diff --git "a/attnserver.run_attnserver.slurm.sh.343214.out.log" "b/attnserver.run_attnserver.slurm.sh.343214.out.log" new file mode 100644--- /dev/null +++ "b/attnserver.run_attnserver.slurm.sh.343214.out.log" @@ -0,0 +1,13366 @@ +Running ctx_length=1024, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=2 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 1024 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 1024 +TP_SIZE: 4 +CP_SIZE: 8 +-------------------------------- +CTX_LENGTH: 1024 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +Cleaning up checkpoint directory: gpt-checkpoint +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +CTX_LENGTH: 1024 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 1024 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 1024 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None +INFO:megatron.training.initialize:Setting logging level to 0 + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 1024 + sequence_parallel ............................... False +INFO:megatron.training.initialize:Setting logging level to 0 + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine +INFO:megatron.training.initialize:Setting logging level to 0 + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True +INFO:megatron.training.initialize:Setting logging level to 0 + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False +INFO:megatron.training.initialize:Setting logging level to 0 + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json +INFO:megatron.training.initialize:Setting logging level to 0 + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +INFO:megatron.training.initialize:Setting logging level to 0 +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.041 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.431 seconds +time to initialize megatron (seconds): 8.493 +[after megatron is initialized] datetime: 2025-06-21 21:27:34 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> embedding>>> decoder + +>>> output_layer +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 +>>> embedding>>> embedding + +>>> decoder>>> decoder + +>>> output_layer>>> output_layer + +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 + +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (144247808 elements, 144247808 padded size): + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.weight + module.embedding.word_embeddings.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.weight +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> embedding +>>> decoder +>>> embedding>>> output_layer + +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 + loading distributed checkpoint from gpt-checkpoint at iteration 10 +Running ctx_length=2048, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=2 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 2048 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +CTX_LENGTH: 2048 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 2048 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 2048 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 2048 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed +INFO:megatron.training.initialize:Setting logging level to 0 + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True +INFO:megatron.training.initialize:Setting logging level to 0 + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False +INFO:megatron.training.initialize:Setting logging level to 0 + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 +INFO:megatron.training.initialize:Setting logging level to 0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 2048 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle +INFO:megatron.training.initialize:Setting logging level to 0 + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None +INFO:megatron.training.initialize:Setting logging level to 0 + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None +INFO:megatron.training.initialize:Setting logging level to 0 + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 +INFO:megatron.training.initialize:Setting logging level to 0 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True +INFO:megatron.training.initialize:Setting logging level to 0 + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 +INFO:megatron.training.initialize:Setting logging level to 0 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it + seq_length ...................................... 2048 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 +INFO:megatron.training.initialize:Setting logging level to 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False +INFO:megatron.training.initialize:Setting logging level to 0 + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.049 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.354 seconds +time to initialize megatron (seconds): 8.025 +[after megatron is initialized] datetime: 2025-06-21 21:28:12 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 + +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> embedding +>>> decoder +>>> output_layer +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +>>> embedding +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding>>> embedding + +>>> decoder +>>> decoder>>> output_layer + +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (148442112 elements, 148442112 padded size): + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.final_layernorm.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine + loading distributed checkpoint from gpt-checkpoint at iteration 10 +Running ctx_length=4096, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=2 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True +INFO:megatron.training.initialize:Setting logging level to 0 + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... +INFO:megatron.training.initialize:Setting logging level to 0 + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 4096 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 +INFO:megatron.training.initialize:Setting logging level to 0 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 4096 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 4096 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False +INFO:megatron.training.initialize:Setting logging level to 0 + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.043 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.638 seconds +time to initialize megatron (seconds): 8.520 +[after megatron is initialized] datetime: 2025-06-21 21:28:53 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (156830720 elements, 156830720 padded size): + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.final_layernorm.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.position_embeddings.weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine + loading distributed checkpoint from gpt-checkpoint at iteration 10 +Running ctx_length=8192, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=2 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 4 +CP_SIZE: 8 +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 4 +CP_SIZE: 8 +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 8192 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 8192 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 8192 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.040 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.430 seconds +time to initialize megatron (seconds): 8.506 +[after megatron is initialized] datetime: 2025-06-21 21:29:33 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +>>> embedding +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> decoder + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (173607936 elements, 173607936 padded size): + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 + module.decoder.layers.0.self_attention.linear_qkv.bias + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +>>> embedding > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 + +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.11, 3.50) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:29:35 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=8192, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005436 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 8324 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001937 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 8320 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001799 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 8335 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:29:35 +done with setup ... +training ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (1230.11, 1262.98) + train/valid/test-data-iterators-setup ..........: (18.08, 144.33) +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:29:35 +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor:batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +tokensbatch tensor: position_ids torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB +[Rank 2] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3418.0 | max reserved: 3418.0 + [2025-06-21 21:29:52] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 16768.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 16] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3484.0 | max reserved: 3484.0 +[Rank 19] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3446.0 | max reserved: 3446.0 +[Rank 9] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3418.0 | max reserved: 3418.0 +[Rank 15] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3406.0 | max reserved: 3406.0 +[Rank 13] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3406.0 | max reserved: 3406.0 +[Rank 11] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3418.0 | max reserved: 3418.0[Rank 8] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3456.0 | max reserved: 3456.0 + +[Rank 14] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3444.0 | max reserved: 3444.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3456.0 | max reserved: 3456.0 +[Rank 31] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3386.0 | max reserved: 3386.0 +[Rank 29] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3424.0 | max reserved: 3424.0 +[Rank 27] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3484.0 | max reserved: 3484.0 +[Rank 24] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3484.0 | max reserved: 3484.0 +[Rank 28] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3424.0 | max reserved: 3424.0[Rank 25] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3484.0 | max reserved: 3484.0 + +[Rank 21] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3386.0 | max reserved: 3386.0 +[Rank 22] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3424.0 | max reserved: 3424.0 +[Rank 10] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3456.0 | max reserved: 3456.0 +[Rank 26] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3484.0 | max reserved: 3484.0 +[Rank 20] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3386.0 | max reserved: 3386.0[Rank 17] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3446.0 | max reserved: 3446.0 + +[Rank 18] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3446.0 | max reserved: 3446.0 +[Rank 12] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3406.0 | max reserved: 3406.0 +[Rank 30] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3424.0 | max reserved: 3424.0 +[Rank 23] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3424.0 | max reserved: 3424.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3418.0 | max reserved: 3418.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3444.0 | max reserved: 3444.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3444.0 | max reserved: 3444.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3406.0 | max reserved: 3406.0 +[Rank 3] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3418.0 | max reserved: 3418.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 2713.64501953125 | max allocated: 2834.22705078125 | reserved: 3444.0 | max reserved: 3444.0 +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:29:52] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 403.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:29:52] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 370.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor:batch tensor after cp: attention_mask tokenstorch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp:batch tensor: tokens torch.Size([2, 2048])tokens + batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: torch.Size([2, 16384])loss_mask +torch.Size([2, 2048]) +batch tensor:batch tensor after cp: labelsattention_mask torch.Size([2, 16384]) +torch.Size([2, 1, 2048, 16384])batch tensor: + batch tensor after cp:loss_mask position_idstorch.Size([2, 16384]) +torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:29:53] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 360.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor:batch tensor after cp: tokens tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +torch.Size([2, 16384])batch tensor after cp: + loss_mask batch tensor:torch.Size([2, 2048]) +labels batch tensor after cp:torch.Size([2, 16384]) +attention_maskbatch tensor: torch.Size([2, 1, 2048, 16384])loss_mask + batch tensor after cp:torch.Size([2, 16384]) +position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:29:53] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 376.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:29:54] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 352.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:29:54] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 359.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:29:54] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 360.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor:batch tensor after cp: tokens tokens torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: torch.Size([2, 16384])labels +torch.Size([2, 2048]) +batch tensor after cp: loss_maskbatch tensor: torch.Size([2, 2048])labels +batch tensor after cp: position_ids torch.Size([2, 2048]) + torch.Size([2, 16384])batch tensor after cp: + attention_maskbatch tensor: loss_masktorch.Size([2, 1, 2048, 16384]) +torch.Size([2, 16384]) +batch tensor after cp: position_ids batch tensor: torch.Size([2, 2048])attention_mask + torch.Size([2, 1, 16384, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:29:55] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 362.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: position_ids batch tensor:torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) + tokens torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp:batch tensor after cp: attention_masktokens torch.Size([2, 1, 2048, 16384]) +batch tensor after cp:torch.Size([2, 2048]) +position_ids batch tensor after cp: labelstorch.Size([2, 2048]) +torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:29:55] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 362.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:29:55 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.02616596221923828 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.026184797286987305 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.02620530128479004 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.026238203048706055 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.027130842208862305 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.02720952033996582 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.027365922927856445 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.027484416961669922 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.027507305145263672 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.02755141258239746 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.02757406234741211 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.0276181697845459 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.027625322341918945 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.027687788009643555 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.02799081802368164 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.027541399002075195 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.02754044532775879 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.02760791778564453 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.027648448944091797 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.027984619140625 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.0281064510345459 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.028146982192993164 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.028193950653076172 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.028058767318725586 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.027671098709106445 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.02785181999206543 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.0278627872467041 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.02830362319946289 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.02852153778076172 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.028457164764404297 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.028670310974121094 to prepare state dict for ckpt +WARNING:megatron.core.dist_checkpointing.serialization:Overwriting old incomplete / corrupted checkpoint... +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.3754892349243164 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3674187660217285 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3386690616607666 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3896369934082031 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3917863368988037 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3388302326202393 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3917644023895264 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3388524055480957 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3919236660003662 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.366584300994873 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3459978103637695 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3897814750671387 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3444750308990479 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3670573234558105 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3667793273925781 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3446364402770996 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.339217185974121 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3923242092132568 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3461687564849854 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3390147686004639 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.358825922012329 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3734025955200195 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.359067440032959 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.347205638885498 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3927314281463623 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3394834995269775 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3925001621246338 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3461697101593018 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.393017053604126 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3931851387023926 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.367790699005127 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3411228656768799 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.017200469970703125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.006825923919677734 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.010399341583251953 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.00945425033569336 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.009875297546386719 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.0024378299713134766 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.006802082061767578 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6653912 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6653907 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, plan time: 0.008907079696655273 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, plan time: 0.009191036224365234 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, plan time: 0.008671045303344727 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, plan time: 0.00982356071472168 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, plan time: 0.00911855697631836 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, plan time: 0.008998870849609375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, plan time: 0.009335517883300781 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, plan time: 0.010202169418334961 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, plan time: 0.009603261947631836 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, plan time: 0.010278940200805664 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6654003 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.010398387908935547 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.666176 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6661742 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6665733 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, plan time: 0.008725643157958984 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6654124 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6661863 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6665795 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.666574 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6665235 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6654189 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, plan time: 0.009728431701660156 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, plan time: 0.009311914443969727 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, plan time: 0.00984048843383789 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6665945 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6665266 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6654162 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, plan time: 0.008344411849975586 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, plan time: 0.009943485260009766 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6665342 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6654348 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.245208740234375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6662285 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.127357482910156e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6665473 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.459785461425781e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6662347 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.556510925292969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6662364 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.723403930664062e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, plan time: 0.010085105895996094 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, plan time: 0.008987665176391602 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.6743621826171875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, plan time: 0.00816035270690918 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.009506225585938e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6662462 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.724761962890625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.319450378417969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.9604644775390625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.412101745605469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.103515625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.081031799316406e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.817413330078125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.666276 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.413459777832031e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.413459777832031e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.29425048828125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, plan time: 0.009294509887695312 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.29425048828125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.666649 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, plan time: 0.009788990020751953 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.318092346191406e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.698204040527344e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.842613220214844e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, plan time: 0.007422685623168945 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6666017 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6666002 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.726119995117188e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6666882 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6666875 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.7738037109375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6667001 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.580352783203125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.557868957519531e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.818771362304688e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, plan time: 0.009849786758422852 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.508827209472656e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.891654968261719e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6669073 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.298324584960938e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.985664367675781e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, plan time: 0.00978541374206543 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6670673 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.392333984375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.009171009063720703 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541397.6697984 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.054473876953125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05043363571166992 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7162614 rank: 7, write(async) time: 0.05086207389831543 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.050565242767333984 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7164106 rank: 5, write(async) time: 0.05099940299987793 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.051064491271972656 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7180674 rank: 21, write(async) time: 0.05149078369140625 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05162239074707031 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.718623 rank: 23, write(async) time: 0.05204653739929199 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05196547508239746 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7177227 rank: 1, write(async) time: 0.05230522155761719 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05200552940368652 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.719137 rank: 19, write(async) time: 0.05244898796081543 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05212545394897461 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05218362808227539 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7188325 rank: 27, write(async) time: 0.052602291107177734 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7192676 rank: 18, write(async) time: 0.05256819725036621 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05267143249511719 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.052546024322509766 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05252575874328613 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7194612 rank: 9, write(async) time: 0.05293750762939453 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7195 rank: 14, write(async) time: 0.052953481674194336 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7197843 rank: 22, write(async) time: 0.05309438705444336 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.053127288818359375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7197165 rank: 31, write(async) time: 0.05353093147277832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.053252220153808594 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0534052848815918 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.719199 rank: 3, write(async) time: 0.05377912521362305 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05321621894836426 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7198653 rank: 25, write(async) time: 0.053688764572143555 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7199519 rank: 24, write(async) time: 0.053676605224609375 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05364990234375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7202554 rank: 28, write(async) time: 0.05407857894897461 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05420279502868652 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05437326431274414 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7211723 rank: 13, write(async) time: 0.054646968841552734 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7213566 rank: 20, write(async) time: 0.05477714538574219 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05494236946105957 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7207828 rank: 4, write(async) time: 0.055350542068481445 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05509018898010254 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.722156 rank: 16, write(async) time: 0.05550742149353027 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05551028251647949 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7225451 rank: 10, write(async) time: 0.05594301223754883 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0564875602722168 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05609011650085449 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.723529 rank: 17, write(async) time: 0.05693197250366211 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7234254 rank: 11, write(async) time: 0.05651974678039551 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0570676326751709 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7236903 rank: 29, write(async) time: 0.05745387077331543 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.057195186614990234 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7246938 rank: 15, write(async) time: 0.057623863220214844 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05817914009094238 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7251246 rank: 8, write(async) time: 0.058591604232788086 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05950665473937988 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7265377 rank: 12, write(async) time: 0.05993795394897461 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06590747833251953 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7361772 rank: 0, write(async) time: 0.0663766860961914 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 1.9073486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 1.8596649169921875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 1.9311904907226562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 3.1948089599609375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 3.123283386230469e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 1.6450881958007812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.10003018379211426 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.7667298 rank: 26, write(async) time: 0.10049009323120117 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 1.9550323486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 2.2172927856445312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 2.1219253540039062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 0.03223991394042969 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 0.03266119956970215 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 0.03229069709777832 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 0.03203105926513672 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 0.03352975845336914 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 0.03186941146850586 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 0.031371116638183594 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 0.033832550048828125 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 0.03243207931518555 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 0.03289365768432617 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 0.03703141212463379 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 0.03649544715881348 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 0.033399105072021484 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 0.036009788513183594 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 0.03662705421447754 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 0.034522056579589844 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 0.03234457969665527 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 0.033477067947387695 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 0.03679466247558594 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 0.03171706199645996 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 0.035724639892578125 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.030988454818725586 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.03483939170837402 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 0.03653836250305176 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.03295779228210449 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 1.621246337890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 1.3828277587890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.15188956260681152 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.8185568 rank: 30, write(async) time: 0.15230822563171387 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 0.03170919418334961 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.03351616859436035 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.03671145439147949 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.20260930061340332 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.20267057418823242 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.8684442 rank: 6, write(async) time: 0.20305085182189941 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541397.868493 rank: 2, write(async) time: 0.20309996604919434 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 0.03294014930725098 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22167552, before: 1700970496, after: 1723138048 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22056960, before: 1695981568, after: 1718038528 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.9311904907226562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22163456, before: 1711771648, after: 1733935104 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22249472, before: 1712291840, after: 1734541312 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 21938176, before: 1708859392, after: 1730797568 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22282240, before: 1700147200, after: 1722429440 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 131072, before: 1695956992, after: 1696088064 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.9788742065429688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 50569216, before: 1686593536, after: 1737162752 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51290112, before: 1711419392, after: 1762709504 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30449664, before: 1701253120, after: 1731702784 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30400512, before: 1699786752, after: 1730187264 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30375936, before: 1700560896, after: 1730936832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.04608750343322754 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51281920, before: 1704464384, after: 1755746304 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51294208, before: 1685688320, after: 1736982528 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22024192, before: 1744007168, after: 1766031360 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30420992, before: 1721483264, after: 1751904256 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30265344, before: 1700192256, after: 1730457600 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30388224, before: 1729613824, after: 1760002048 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.032620906829833984 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47099904, before: 1711419392, after: 1758519296 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47079424, before: 1704468480, after: 1751547904 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 46899200, before: 1686593536, after: 1733492736 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 1.9788742065429688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 51572736, before: 1739141120, after: 1790713856 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 51654656, before: 1738252288, after: 1789906944 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47034368, before: 1685688320, after: 1732722688 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72142848, before: 1703432192, after: 1775575040 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72306688, before: 1738252288, after: 1810558976 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72298496, before: 1757761536, after: 1830060032 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 55513088, before: 1757863936, after: 1813377024 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.000656, rank: 27, write(sync,parallel): 0.20489144325256348 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 55255040, before: 1703432192, after: 1758687232 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51408896, before: 1712381952, after: 1763790848 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72343552, before: 1708769280, after: 1781112832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72388608, before: 1701040128, after: 1773428736 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72478720, before: 1700147200, after: 1772625920 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72429568, before: 1711771648, after: 1784201216 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72527872, before: 1695903744, after: 1768431616 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.0338902473449707 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.0144742, rank: 31, write(sync,parallel): 0.22182440757751465 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72409088, before: 1712291840, after: 1784700928 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.015785, rank: 25, write(sync,parallel): 0.21688389778137207 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47370240, before: 1712304128, after: 1759674368 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51474432, before: 1689985024, after: 1741459456 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72429568, before: 1729601536, after: 1802031104 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.0329704, rank: 29, write(sync,parallel): 0.23063373565673828 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.0332448, rank: 24, write(sync,parallel): 0.22786402702331543 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 0, before: 1712508928, after: 1712508928 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.28s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.04123, rank: 28, write(sync,parallel): 0.23192644119262695 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 55418880, before: 1702801408, after: 1758220288 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.0444891, rank: 22, write(sync,parallel): 0.253143310546875 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.0446768, rank: 17, write(sync,parallel): 0.2480182647705078 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72175616, before: 1739141120, after: 1811316736 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72458240, before: 1700564992, after: 1773023232 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.0456948, rank: 21, write(sync,parallel): 0.25922369956970215 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.0460622, rank: 16, write(sync,parallel): 0.23818230628967285 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72409088, before: 1721483264, after: 1793892352 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.0483425, rank: 18, write(sync,parallel): 0.2583198547363281 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 331776, before: 2017525760, after: 2017857536 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72339456, before: 1699794944, after: 1772134400 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.0514488, rank: 19, write(sync,parallel): 0.2617952823638916 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47284224, before: 1689985024, after: 1737269248 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72269824, before: 1700192256, after: 1772462080 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72396800, before: 1701253120, after: 1773649920 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.30s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.30s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.056651, rank: 23, write(sync,parallel): 0.267871618270874 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.0663943, rank: 26, write(sync,parallel): 0.22765636444091797 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.31s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.31s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.073007, rank: 13, write(sync,parallel): 0.27812957763671875 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 290816, before: 1687101440, after: 1687392256 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72384512, before: 1702801408, after: 1775185920 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.31s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.085199, rank: 10, write(sync,parallel): 0.28429150581359863 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.0880337, rank: 9, write(sync,parallel): 0.29433417320251465 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.0885768, rank: 30, write(sync,parallel): 0.1990194320678711 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.0901396, rank: 14, write(sync,parallel): 0.2978057861328125 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.092327, rank: 15, write(sync,parallel): 0.2895805835723877 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.093984, rank: 11, write(sync,parallel): 0.2939779758453369 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.097276, rank: 20, write(sync,parallel): 0.29378747940063477 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.35s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.1218894, rank: 12, write(sync,parallel): 0.310596227645874 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.36s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.27s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.37s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.37s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.37s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.37s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.37s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.39s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109133824, before: 1686265856, after: 1795399680 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108785664, before: 1744007168, after: 1852792832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.208826, rank: 5, write(sync,parallel): 0.3938028812408447 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.229169, rank: 8, write(sync,parallel): 0.4101719856262207 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.47s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.49s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108949504, before: 1690578944, after: 1799528448 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 139415552, before: 1742290944, after: 1881706496 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.324148, rank: 7, write(sync,parallel): 0.5110940933227539 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.3318093, rank: 4, write(sync,parallel): 0.5121877193450928 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109219840, before: 1694167040, after: 1803386880 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.59s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.60s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.3913553, rank: 6, write(sync,parallel): 0.43096446990966797 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.51s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212041728, before: 1695956992, after: 1907998720 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.6139808, rank: 3, write(sync,parallel): 0.7567658424377441 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.84s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212430848, before: 1687101440, after: 1899532288 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212062208, before: 1712484352, after: 1924546560 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212463616, before: 2017525760, after: 2229989376 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.8004165, rank: 2, write(sync,parallel): 0.7840192317962646 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.810729, rank: 1, write(sync,parallel): 0.9543359279632568 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541398.82958, rank: 0, write(sync,parallel): 0.8786754608154297 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.86s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.04s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.99s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8939528, 1, gather: 0.03973889350891113 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8940187, 2, gather: 0.055001258850097656 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8940065, 7, gather: 0.529634952545166 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8940005, 6, gather: 0.46277618408203125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.894078, 4, gather: 0.5106463432312012 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.894258, 5, gather: 0.6450400352478027 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.895414, 9, gather: 0.7684495449066162 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8954182, 10, gather: 0.7717673778533936 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8943012, 3, gather: 0.23576045036315918 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.895565, 13, gather: 0.7811012268066406 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8957043, 25, gather: 0.838068962097168 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8958468, 26, gather: 0.7770626544952393 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8958457, 27, gather: 0.8545942306518555 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8958423, 28, gather: 0.8148562908172607 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8959715, 19, gather: 0.8067224025726318 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8959756, 22, gather: 0.8140981197357178 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8960526, 17, gather: 0.8134162425994873 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8960528, 20, gather: 0.7563469409942627 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8960598, 21, gather: 0.8095591068267822 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8960545, 23, gather: 0.8005807399749756 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8957803, 15, gather: 0.7641894817352295 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8957934, 11, gather: 0.7629785537719727 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.896114, 30, gather: 0.7705087661743164 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8961158, 29, gather: 0.8232340812683105 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8961132, 24, gather: 0.8225288391113281 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.896142, 31, gather: 0.8406660556793213 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8960972, 16, gather: 0.8117494583129883 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8958728, 8, gather: 0.6266906261444092 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8962123, 18, gather: 0.808762788772583 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8957927, 12, gather: 0.7353546619415283 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.8958883, 14, gather: 0.7653336524963379 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.9006317, 0, gather: 0.009641408920288086 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541398.911384, metadata_write: 0.010605812072753906 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0229s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0745s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0595s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2551s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5493s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5303s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4823s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6644s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8567s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7958s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8409s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8415s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8732s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7887s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8333s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8588s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8321s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8303s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8255s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8274s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7750s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8001s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7909s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7818s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7830s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6454s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8283s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7876s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7842s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8332s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8197s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7540s + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/4, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.002107858657836914 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.0020868778228759766 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.0020711421966552734 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.0020868778228759766 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.002108335494995117 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.002074003219604492 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.0020401477813720703 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.002026081085205078 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.0020911693572998047 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.0020949840545654297 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.002057790756225586 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.002078533172607422 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.0020599365234375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.0020465850830078125 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.002071857452392578 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.0020973682403564453 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.002089977264404297 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.0020873546600341797 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.0020782947540283203 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.002101421356201172 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.0021691322326660156 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.0022482872009277344 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.002187490463256836 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.0021283626556396484 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.002236604690551758 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.002215862274169922 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.002146482467651367 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.0021333694458007812 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.0021867752075195312 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.002188444137573242 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.0021882057189941406 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.002256631851196289 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labelsbatch tensor: torch.Size([2, 16384]) +tokens batch tensor: loss_mask torch.Size([2, 16384]) +torch.Size([2, 16384])batch tensor: +attention_mask batch tensor:torch.Size([2, 1, 16384, 16384]) +labels batch tensor:torch.Size([2, 16384]) +position_idsbatch tensor: loss_masktorch.Size([2, 16384]) +torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask batch tensor after cp:torch.Size([2, 1, 2048, 16384]) +tokens batch tensor after cp: position_idstorch.Size([2, 2048]) +torch.Size([2, 2048])batch tensor after cp: + labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +Start exporting trace 10 +Done exporting trace 10 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (3072.13, 3073.45) +---------------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on validation set | lm loss value: 1.139361E+01 | lm loss PPL: 8.875244E+04 | +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor:batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) + tokens batch tensor: loss_mask torch.Size([2, 16384]) +torch.Size([2, 16384])batch tensor: attention_mask + torch.Size([2, 1, 16384, 16384])batch tensor: +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) + labels batch tensor:torch.Size([2, 16384]) +position_ids torch.Size([2, 16384])batch tensor: + loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor: tokens torch.Size([2, 16384]) +batch tensor: labels torch.Size([2, 16384]) +batch tensor: loss_mask torch.Size([2, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor: attention_mask torch.Size([2, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([2, 16384]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: tokens torch.Size([2, 2048]) +batch tensor after cp: labels torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: loss_mask torch.Size([2, 2048]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([2, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +batch tensor after cp: position_ids torch.Size([2, 2048]) +Start exporting trace 11 +Done exporting trace 11 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (363.87, 1718.64) +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 1.139361E+01 | lm loss PPL: 8.875244E+04 | +---------------------------------------------------------------------------------------------------------- +Running ctx_length=12288, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=2 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 12288 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +CTX_LENGTH: 12288 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 12288 +TP_SIZE: 4 +CP_SIZE: 8 +Cleaning up checkpoint directory: gpt-checkpoint +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 12288 +TP_SIZE: 4 +CP_SIZE: 8 +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 12288 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 12288 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 12288 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.048 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 5.839 seconds +time to initialize megatron (seconds): 13.559 +[after megatron is initialized] datetime: 2025-06-21 21:30:47 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (190385152 elements, 190385152 padded size): + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.embedding.position_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.word_embeddings.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (2.93, 3.45) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:30:48 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=12288, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005763 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 5549 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001830 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 5546 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001699 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 5557 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:30:48 +done with setup ... +training ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (776.15, 811.08) + train/valid/test-data-iterators-setup ..........: (19.36, 150.92) +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:30:48 +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB +[Rank 6] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4706.0 | max reserved: 4706.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4706.0 | max reserved: 4706.0 + [2025-06-21 21:31:02] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 14366.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 15] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4700.0 | max reserved: 4700.0[Rank 13] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4716.0 | max reserved: 4716.0 + +[Rank 8] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4780.0 | max reserved: 4780.0 +[Rank 14] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4716.0 | max reserved: 4716.0 +[Rank 19] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4764.0 | max reserved: 4764.0[Rank 21] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4726.0 | max reserved: 4726.0 + +[Rank 16] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4764.0 | max reserved: 4764.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4690.0 | max reserved: 4690.0 +[Rank 25] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4778.0 | max reserved: 4778.0 +[Rank 11] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4764.0 | max reserved: 4764.0 +[Rank 10] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4780.0 | max reserved: 4780.0 +[Rank 20] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4728.0 | max reserved: 4728.0 +[Rank 23] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4728.0 | max reserved: 4728.0[Rank 22] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4728.0 | max reserved: 4728.0 + +[Rank 18] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4764.0 | max reserved: 4764.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4706.0 | max reserved: 4706.0 +[Rank 31] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4732.0 | max reserved: 4732.0 +[Rank 12] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4716.0 | max reserved: 4716.0 +[Rank 17] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4860.0 | max reserved: 4860.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4690.0 | max reserved: 4690.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4690.0 | max reserved: 4690.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4706.0 | max reserved: 4706.0 +[Rank 24] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4794.0 | max reserved: 4794.0 +[Rank 9] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4780.0 | max reserved: 4780.0 +[Rank 3] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4706.0 | max reserved: 4706.0 +[Rank 29] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4716.0 | max reserved: 4716.0 +[Rank 30] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4716.0 | max reserved: 4716.0 +[Rank 26] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4794.0 | max reserved: 4794.0 +[Rank 28] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4732.0 | max reserved: 4732.0 +[Rank 27] (after 1 iterations) memory (MB) | allocated: 3626.13720703125 | max allocated: 4115.63720703125 | reserved: 4778.0 | max reserved: 4778.0 +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:31:03] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 807.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:31:04] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 765.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:31:05] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 777.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:31:06] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 766.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +torch.Size([2, 24576])batch tensor after cp: + labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor:batch tensor after cp: labels loss_mask torch.Size([2, 24576])torch.Size([2, 3072]) + +batch tensor:batch tensor after cp: loss_maskattention_mask torch.Size([2, 24576]) +torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp:batch tensor: position_idsattention_mask torch.Size([2, 3072]) +torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:31:06] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 769.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:31:07] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 771.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:31:08] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 762.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:31:09] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 744.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:31:09] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 838.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:31:09 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.03443193435668945 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.034468889236450195 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.034493446350097656 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.03451061248779297 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.03453803062438965 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.03489232063293457 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.03472256660461426 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.03464865684509277 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.034670352935791016 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.034929752349853516 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.03477025032043457 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.034758567810058594 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.034651994705200195 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.03479170799255371 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.034714698791503906 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.035456180572509766 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.035126447677612305 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.03535032272338867 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.03636908531188965 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.03680610656738281 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.03709983825683594 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.03718233108520508 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.037139892578125 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.03718447685241699 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.03737449645996094 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.037648677825927734 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.03762507438659668 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.0378415584564209 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.046810150146484375 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.04732489585876465 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.049176931381225586 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.04919767379760742 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(201326592), 1), (np.int64(124780544), 2), (np.int64(125829120), 3), (np.int64(125829120), 4), (np.int64(117646336), 5), (np.int64(117646336), 6), (np.int64(121634816), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(201326592), 1), (np.int64(124780544), 2), (np.int64(125829120), 3), (np.int64(125829120), 4), (np.int64(117646336), 5), (np.int64(117646336), 6), (np.int64(121634816), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(201326592), 1), (np.int64(124780544), 2), (np.int64(125829120), 3), (np.int64(125829120), 4), (np.int64(117646336), 5), (np.int64(117646336), 6), (np.int64(121634816), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(201326592), 1), (np.int64(124780544), 2), (np.int64(125829120), 3), (np.int64(125829120), 4), (np.int64(117646336), 5), (np.int64(117646336), 6), (np.int64(121634816), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(201326592), 1), (np.int64(124780544), 2), (np.int64(125829120), 3), (np.int64(125829120), 4), (np.int64(117646336), 5), (np.int64(117646336), 6), (np.int64(121634816), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(201326592), 1), (np.int64(124780544), 2), (np.int64(125829120), 3), (np.int64(125829120), 4), (np.int64(117646336), 5), (np.int64(117646336), 6), (np.int64(121634816), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(201326592), 1), (np.int64(124780544), 2), (np.int64(125829120), 3), (np.int64(125829120), 4), (np.int64(117646336), 5), (np.int64(117646336), 6), (np.int64(121634816), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(201326592), 1), (np.int64(124780544), 2), (np.int64(125829120), 3), (np.int64(125829120), 4), (np.int64(117646336), 5), (np.int64(117646336), 6), (np.int64(121634816), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.873645067214966 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.6602883338928223 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.9054996967315674 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.907268524169922 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.668045997619629 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.872561454772949 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.907688617706299 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.665239095687866 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.8749780654907227 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.9069995880126953 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.667062520980835 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.8746142387390137 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.660827398300171 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 4.020754098892212 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.668159246444702 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.874403476715088 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.661139488220215 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.660679817199707 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.906925678253174 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.669353723526001 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.8740439414978027 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.660057783126831 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.6670126914978027 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.9012835025787354 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.661435604095459 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.6701595783233643 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.874131679534912 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.661679744720459 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.867255210876465 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.9086849689483643 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.9091527462005615 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.01824808120727539 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.009601116180419922 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.007355928421020508 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.007712602615356445 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.0037152767181396484 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, plan time: 0.009018182754516602 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, plan time: 0.008938312530517578 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, plan time: 0.009605646133422852 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, plan time: 0.007795572280883789 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, plan time: 0.008650779724121094 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, plan time: 0.00899505615234375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, plan time: 0.009373188018798828 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, plan time: 0.009114742279052734 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, plan time: 0.008727073669433594 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, plan time: 0.007942676544189453 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.003756284713745117 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.007722616195678711 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3595932 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, plan time: 0.009564638137817383 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, plan time: 0.0077266693115234375 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3607268 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.359615 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3596187 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3596191 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3603408 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3603423 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3603468 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3603556 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, plan time: 0.00839543342590332 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3606136 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, plan time: 0.009246349334716797 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3606179 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.360735 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.360738 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3607402 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.359628 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3596256 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3603623 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3606453 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3606524 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, plan time: 0.006604433059692383 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.745887756347656e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3603888 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, plan time: 0.00648045539855957 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, plan time: 0.007249116897583008 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, plan time: 0.007436513900756836 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3607893 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.224082946777344e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.152557373046875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.963180541992188e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.43865966796875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.818771362304688e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.175041198730469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, plan time: 0.008596420288085938 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.246566772460938e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.104873657226562e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.866455078125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.604194641113281e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.0001068115234375 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.581710815429688e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.581710815429688e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, plan time: 0.007929325103759766 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, plan time: 0.009041786193847656 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, plan time: 0.009227991104125977 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3608131 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3608048 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.002864837646484375 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.175041198730469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, plan time: 0.005076408386230469 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, plan time: 0.0057985782623291016 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.179115295410156e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3606992 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.918212890625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3608477 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3598726 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3604603 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.360461 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.250640869140625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3607063 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.224082946777344e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.271766662597656e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.775161743164062e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010776519775390625 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.699562072753906e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.463859558105469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.942054748535156e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.009371757507324219 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3607264 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.34600830078125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.818771362304688e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.3643022 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541474.360726 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.793571472167969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.319450378417969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.273124694824219e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.988380432128906e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.655952453613281e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.052919626235961914 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.412895 rank: 7, write(async) time: 0.0532686710357666 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05341815948486328 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.051621437072753906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.051866769790649414 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05346822738647461 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4141982 rank: 25, write(async) time: 0.0538330078125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4126313 rank: 13, write(async) time: 0.05201864242553711 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05158829689025879 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4130185 rank: 22, write(async) time: 0.05228900909423828 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4135473 rank: 5, write(async) time: 0.05393409729003906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05356550216674805 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4127455 rank: 14, write(async) time: 0.05202126502990723 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05223369598388672 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.053858041763305664 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4143221 rank: 31, write(async) time: 0.053978919982910156 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.052117109298706055 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4133697 rank: 21, write(async) time: 0.05263471603393555 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4138246 rank: 4, write(async) time: 0.05423450469970703 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05384016036987305 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.413218 rank: 15, write(async) time: 0.05257129669189453 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0532984733581543 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.057588815689086914 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4146445 rank: 28, write(async) time: 0.05430006980895996 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05573391914367676 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4144428 rank: 19, write(async) time: 0.053704261779785156 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.417669 rank: 3, write(async) time: 0.058045387268066406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.058179378509521484 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4167688 rank: 12, write(async) time: 0.056116580963134766 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.054970502853393555 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06052994728088379 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.418943 rank: 27, write(async) time: 0.05859851837158203 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05611848831176758 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4162414 rank: 23, write(async) time: 0.05542898178100586 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4207652 rank: 1, write(async) time: 0.06089138984680176 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06135082244873047 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4171538 rank: 10, write(async) time: 0.05653500556945801 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05520486831665039 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4221618 rank: 24, write(async) time: 0.06180453300476074 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06132221221923828 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.416417 rank: 20, write(async) time: 0.05567479133605957 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.422446 rank: 11, write(async) time: 0.061738014221191406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05826616287231445 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.061916351318359375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4195318 rank: 18, write(async) time: 0.05872702598571777 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4230874 rank: 8, write(async) time: 0.062361717224121094 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06012320518493652 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4213762 rank: 16, write(async) time: 0.060526371002197266 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06467461585998535 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4256086 rank: 29, write(async) time: 0.06514930725097656 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06197786331176758 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.426702 rank: 0, write(async) time: 0.062392473220825195 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06794977188110352 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4291441 rank: 17, write(async) time: 0.06835556030273438 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06874585151672363 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.4298346 rank: 9, write(async) time: 0.0691368579864502 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 2.0742416381835938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 2.4557113647460938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 1.9311904907226562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 1.9788742065429688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 1.5735626220703125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 2.5987625122070312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 1.9788742065429688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 0.03210759162902832 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 1.3828277587890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 0.03196239471435547 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 0.03244900703430176 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 0.03227734565734863 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 0.036687612533569336 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 0.033493995666503906 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 0.03287386894226074 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.030750274658203125 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 0.03187966346740723 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.030743837356567383 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 0.03711199760437012 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 0.03379178047180176 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 0.03702688217163086 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.14569711685180664 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.5065506 rank: 26, write(async) time: 0.14616036415100098 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 0.039539337158203125 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 0.03589963912963867 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 0.03241229057312012 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 0.03191733360290527 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 0.04553985595703125 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 0.045827627182006836 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 0.04186868667602539 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 0.03896212577819824 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 0.03533577919006348 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 0.03957319259643555 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 2.09808349609375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 2.288818359375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 2.002716064453125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 0.040222883224487305 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.03278231620788574 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.20173001289367676 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.5626805 rank: 30, write(async) time: 0.20221996307373047 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.03551793098449707 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.03934049606323242 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22306816, before: 1711767552, after: 1734074368 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 0.0329892635345459 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.22843408584594727 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.5884278 rank: 2, write(async) time: 0.22880959510803223 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.2285752296447754 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541474.5886385 rank: 6, write(async) time: 0.22902536392211914 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22089728, before: 1735184384, after: 1757274112 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 2.1457672119140625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22102016, before: 1717161984, after: 1739264000 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 1.8596649169921875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30343168, before: 1716940800, after: 1747283968 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30396416, before: 1753370624, after: 1783767040 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22122496, before: 1712152576, after: 1734275072 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30420992, before: 1716531200, after: 1746952192 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30466048, before: 1716793344, after: 1747259392 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 208896, before: 1716842496, after: 1717051392 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22089728, before: 1714221056, after: 1736310784 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51372032, before: 1700933632, after: 1752305664 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30326784, before: 1727488000, after: 1757814784 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.9788742065429688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 21999616, before: 1726652416, after: 1748652032 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.043045759201049805 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51122176, before: 1731039232, after: 1782161408 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 0.04723501205444336 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51302400, before: 1723490304, after: 1774792704 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30494720, before: 1728159744, after: 1758654464 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47177728, before: 1700933632, after: 1748111360 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72429568, before: 1711767552, after: 1784197120 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 46927872, before: 1731039232, after: 1777967104 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.031670331954956055 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72429568, before: 1735184384, after: 1807613952 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72458240, before: 1753370624, after: 1825828864 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30371840, before: 1732763648, after: 1763135488 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47104000, before: 1723490304, after: 1770594304 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51515392, before: 1727721472, after: 1779236864 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 25878528, before: 1771622400, after: 1797500928 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 1.5497207641601562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72368128, before: 1716531200, after: 1788899328 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72298496, before: 1716936704, after: 1789235200 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72327168, before: 1716789248, after: 1789116416 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72343552, before: 1727488000, after: 1799831552 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47206400, before: 1727721472, after: 1774927872 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 323584, before: 1724575744, after: 1724899328 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72204288, before: 1714229248, after: 1786433536 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7068024, rank: 31, write(sync,parallel): 0.2065267562866211 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7083611, rank: 22, write(sync,parallel): 0.21968626976013184 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72445952, before: 1728167936, after: 1800613888 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72368128, before: 1712152576, after: 1784520704 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72224768, before: 1717239808, after: 1789464576 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.03125786781311035 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7170012, rank: 25, write(sync,parallel): 0.2136211395263672 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72175616, before: 1726681088, after: 1798856704 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7203863, rank: 13, write(sync,parallel): 0.2209322452545166 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.721198, rank: 21, write(sync,parallel): 0.22601675987243652 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7230644, rank: 27, write(sync,parallel): 0.20840072631835938 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72179712, before: 1759899648, after: 1832079360 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7238672, rank: 10, write(sync,parallel): 0.21622538566589355 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7264302, rank: 14, write(sync,parallel): 0.23376798629760742 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 55922688, before: 1729794048, after: 1785716736 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72413184, before: 1729794048, after: 1802207232 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7337089, rank: 15, write(sync,parallel): 0.23528504371643066 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.30s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7467391, rank: 29, write(sync,parallel): 0.2207024097442627 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7471063, rank: 11, write(sync,parallel): 0.2356429100036621 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.29s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.30s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7514462, rank: 18, write(sync,parallel): 0.24258708953857422 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.30s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.29s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7597651, rank: 9, write(sync,parallel): 0.2361152172088623 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7557018, rank: 23, write(sync,parallel): 0.24965357780456543 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.29s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.30s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72220672, before: 1754574848, after: 1826795520 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.31s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 64036864, before: 1759899648, after: 1823936512 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.31s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.31s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 311296, before: 2075398144, after: 2075709440 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72343552, before: 1744437248, after: 1816780800 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.767792, rank: 17, write(sync,parallel): 0.2435591220855713 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51519488, before: 1727909888, after: 1779429376 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.768558, rank: 19, write(sync,parallel): 0.2697610855102539 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 59695104, before: 1744408576, after: 1804103680 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.7865791, rank: 20, write(sync,parallel): 0.26906490325927734 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.35s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.8102932, rank: 16, write(sync,parallel): 0.2794806957244873 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 270336, before: 1703518208, after: 1703788544 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 55750656, before: 1754574848, after: 1810325504 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 50421760, before: 1715154944, after: 1765576704 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47194112, before: 1727897600, after: 1775091712 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 105861120, before: 1732771840, after: 1838632960 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109002752, before: 1771634688, after: 1880637440 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.35s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47136768, before: 1715167232, after: 1762304000 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.37s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.8579185, rank: 28, write(sync,parallel): 0.3312835693359375 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.86559, rank: 26, write(sync,parallel): 0.2732725143432617 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.8667011, rank: 24, write(sync,parallel): 0.33342957496643066 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.874086, rank: 12, write(sync,parallel): 0.34218573570251465 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.877045, rank: 8, write(sync,parallel): 0.33585166931152344 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541474.9033298, rank: 30, write(sync,parallel): 0.2458045482635498 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.42s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.36s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.43s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.43s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.44s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109207552, before: 1721409536, after: 1830617088 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541475.0483139, rank: 5, write(sync,parallel): 0.5455255508422852 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109191168, before: 1706946560, after: 1816137728 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.62s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541475.1032343, rank: 7, write(sync,parallel): 0.6040756702423096 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.68s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109084672, before: 1719046144, after: 1828130816 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541475.1940947, rank: 6, write(sync,parallel): 0.514742374420166 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.59s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 206278656, before: 1712513024, after: 1918791680 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212279296, before: 1716850688, after: 1929129984 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212287488, before: 1724575744, after: 1936863232 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541475.3815367, rank: 4, write(sync,parallel): 0.81685471534729 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541475.384029, rank: 3, write(sync,parallel): 0.81380295753479 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541475.4117932, rank: 1, write(sync,parallel): 0.8398070335388184 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.89s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.89s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212279296, before: 2075443200, after: 2287722496 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.92s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541475.5134692, rank: 0, write(sync,parallel): 0.8595921993255615 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212262912, before: 1703518208, after: 1915781120 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541475.5559285, rank: 2, write(sync,parallel): 0.8388931751251221 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.97s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.91s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5910294, 2, gather: 0.0021610260009765625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5912929, 1, gather: 0.13801264762878418 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.592812, 11, gather: 0.7976722717285156 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.592818, 9, gather: 0.7858588695526123 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5929732, 12, gather: 0.6713681221008301 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5929954, 13, gather: 0.8336405754089355 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5929952, 10, gather: 0.8300268650054932 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5930214, 14, gather: 0.8256642818450928 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5927756, 4, gather: 0.1709299087524414 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.593114, 15, gather: 0.8179335594177246 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.593124, 8, gather: 0.6638121604919434 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5934396, 22, gather: 0.8443286418914795 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5934837, 16, gather: 0.7358160018920898 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5934713, 23, gather: 0.7879011631011963 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5929806, 6, gather: 0.3582754135131836 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5934312, 25, gather: 0.8324880599975586 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5936136, 19, gather: 0.7837650775909424 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5929542, 3, gather: 0.17021584510803223 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5935237, 26, gather: 0.6754636764526367 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5936532, 20, gather: 0.7642323970794678 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.593582, 21, gather: 0.8299949169158936 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.593063, 7, gather: 0.45209574699401855 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5935419, 27, gather: 0.8288850784301758 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.593936, 17, gather: 0.776461124420166 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.593105, 5, gather: 0.5018661022186279 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5937312, 28, gather: 0.6863138675689697 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.593949, 18, gather: 0.804009199142456 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5938666, 24, gather: 0.6723580360412598 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5938783, 31, gather: 0.8432972431182861 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.594047, 29, gather: 0.799802303314209 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5941567, 30, gather: 0.6431891918182373 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.5958242, 0, gather: 0.024123430252075195 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541475.610817, metadata_write: 0.014853477478027344 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0242s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1598s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0418s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1903s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8636s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6926s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8493s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6632s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8197s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7068s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8530s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8190s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8069s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6850s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6927s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8388s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7968s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1912s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6959s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8510s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8471s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7567s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3784s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8547s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7850s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8043s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8245s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8507s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8653s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8088s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5219s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4719s +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.002217531204223633 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.002224445343017578 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.002216339111328125 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.002184152603149414 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.0021648406982421875 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.0022199153900146484 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.002226591110229492 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.002241373062133789 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.002213001251220703 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.0022013187408447266 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.0022308826446533203 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.0022351741790771484 to finalize ckpt save + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/4, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.0022072792053222656 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.002210378646850586 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.002228975296020508 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.002228975296020508 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.00228118896484375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.0022957324981689453 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.00223541259765625 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.002239704132080078 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.002249479293823242 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.0022554397583007812 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.0022602081298828125 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.0021822452545166016 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.002398967742919922 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.0023310184478759766 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.002414703369140625 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.0022878646850585938 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.0022466182708740234 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.0022628307342529297 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.0022411346435546875 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.002259969711303711 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576])batch tensor: +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: labels torch.Size([2, 3072]) + batch tensor after cp:tokens position_ids torch.Size([2, 3072]) +torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +Start exporting trace 10 +Done exporting trace 10 +(min, max) time across ranks (ms): + evaluate .......................................: (3910.46, 3914.46) +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED + validation loss at iteration 10 on validation set | lm loss value: 1.094642E+01 | lm loss PPL: 5.675044E+04 | +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor: tokens torch.Size([2, 24576])batch tensor after cp: + tokens batch tensor: torch.Size([2, 3072])labels + batch tensor after cp:torch.Size([2, 24576]) +labels batch tensor:torch.Size([2, 3072]) +loss_mask batch tensor after cp:torch.Size([2, 24576]) +loss_mask torch.Size([2, 3072])batch tensor: + batch tensor after cp:attention_mask attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: torch.Size([2, 1, 24576, 24576])position_ids + torch.Size([2, 3072])batch tensor: + position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +batch tensor: tokens torch.Size([2, 24576]) +batch tensor: labels torch.Size([2, 24576]) +batch tensor: loss_mask torch.Size([2, 24576]) +batch tensor: attention_mask torch.Size([2, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([2, 24576]) +batch tensor after cp: tokens torch.Size([2, 3072]) +batch tensor after cp: labels torch.Size([2, 3072]) +batch tensor after cp: loss_mask torch.Size([2, 3072]) +batch tensor after cp: attention_mask torch.Size([2, 1, 3072, 24576]) +batch tensor after cp: position_ids torch.Size([2, 3072]) +Start exporting trace 11 +Done exporting trace 11 +(min, max) time across ranks (ms): + evaluate .......................................: (743.44, 745.68) +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 1.094642E+01 | lm loss PPL: 5.675044E+04 | +---------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED