diff --git "a/attnserver.run_attnserver.slurm.sh.343222.out.log" "b/attnserver.run_attnserver.slurm.sh.343222.out.log" new file mode 100644--- /dev/null +++ "b/attnserver.run_attnserver.slurm.sh.343222.out.log" @@ -0,0 +1,8956 @@ +Running ctx_length=1024, TP_SIZE=4, CP_SIZE=4, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 1024 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 1024 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 16, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 1024 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 16 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 1024 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 1024 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 16 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.114 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.387 seconds +time to initialize megatron (seconds): 8.111 +[after megatron is initialized] datetime: 2025-06-21 21:28:25 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> embedding>>> decoder + +>>> output_layer +>>> decoder + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> embedding +>>> decoder +>>> output_layer +>>> decoder + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (144247808 elements, 144247808 padded size): + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.34, 3.53) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:28:26 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=1024, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.007373 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 66592 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.004479 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 66562 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.004075 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 66686 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:28:26 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (532.80, 551.20) + train/valid/test-data-iterators-setup ..........: (22.31, 139.40) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:28:26 +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens batch tensor:torch.Size([8, 8192]) + tokens batch tensor: labels torch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor: +loss_mask torch.Size([8, 8192]) +batch tensor: batch tensor:labels attention_masktorch.Size([8, 8192]) +torch.Size([8, 1, 8192, 8192])batch tensor: + batch tensor:loss_mask position_ids torch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: batch tensor after cp:loss_mask torch.Size([8, 2048]) +tokens batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192])torch.Size([8, 2048]) + +batch tensor after cp: batch tensor after cp:position_ids labels torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp:batch tensor after cp: tokens tokens torch.Size([8, 2048]) +batch tensor after cp:torch.Size([8, 2048]) labels + torch.Size([8, 2048]) +batch tensor after cp: batch tensor after cp:labels loss_mask torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp: attention_maskbatch tensor after cp: torch.Size([8, 1, 2048, 8192])loss_mask + batch tensor after cp:torch.Size([8, 2048]) +position_ids torch.Size([8, 2048])batch tensor after cp: + attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB +[Rank 0] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5754.0 | max reserved: 5754.0 + [2025-06-21 21:28:44] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 18527.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 2] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5754.0 | max reserved: 5754.0 +[Rank 11] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5974.0 | max reserved: 5974.0[Rank 10] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5974.0 | max reserved: 5974.0[Rank 13] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5786.0 | max reserved: 5786.0 +[Rank 14] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5914.0 | max reserved: 5914.0 + +[Rank 8] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5974.0 | max reserved: 5974.0 +[Rank 12] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5914.0 | max reserved: 5914.0 + +[Rank 1] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5754.0 | max reserved: 5754.0 +[Rank 9] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5718.0 | max reserved: 5718.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5770.0 | max reserved: 5770.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5898.0 | max reserved: 5898.0 +[Rank 15] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5786.0 | max reserved: 5786.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5898.0 | max reserved: 5898.0 +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +[Rank 5] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5898.0 | max reserved: 5898.0 +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +[Rank 3] (after 1 iterations) memory (MB) | allocated: 2442.84814453125 | max allocated: 5271.93408203125 | reserved: 5754.0 | max reserved: 5754.0 +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor:batch tensor after cp: labels tokenstorch.Size([8, 8192]) +batch tensor: torch.Size([8, 2048])loss_mask +torch.Size([8, 8192])batch tensor after cp: + labelsbatch tensor: torch.Size([8, 2048])attention_mask + batch tensor after cp: torch.Size([8, 1, 8192, 8192])loss_mask +batch tensor: position_ids torch.Size([8, 8192]) + batch tensor:torch.Size([8, 2048]) +position_ids batch tensor after cp: torch.Size([8, 8192])attention_mask +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) + torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_maskbatch tensor: torch.Size([8, 2048]) + batch tensor after cp:tokens attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_idstorch.Size([8, 8192]) +torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: batch tensor after cp:attention_mask tokenstorch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor:torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +position_idsbatch tensor after cp: torch.Size([8, 8192])labels + torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor:batch tensor after cp: loss_mask tokenstorch.Size([8, 8192]) +torch.Size([8, 2048])batch tensor: +batch tensor after cp: position_ids torch.Size([8, 2048]) + attention_maskbatch tensor after cp: labelstorch.Size([8, 1, 8192, 8192]) +torch.Size([8, 2048])batch tensor: +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) + batch tensor after cp:position_ids loss_masktorch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:28:44] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 104.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: batch tensor:loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) + batch tensor after cp:tokens attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: position_idstorch.Size([8, 8192]) torch.Size([8, 2048]) +batch tensor: +batch tensor: labels tokenstorch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor: +batch tensor: position_ids torch.Size([8, 8192]) +attention_mask torch.Size([8, 1, 8192, 8192])batch tensor: + labelsbatch tensor: position_idstorch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor: + loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp:batch tensor: tokensposition_ids torch.Size([8, 8192])torch.Size([8, 2048]) + +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp:batch tensor after cp: tokensposition_ids torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: batch tensor after cp:attention_mask tokenstorch.Size([8, 1, 8192, 8192]) +batch tensor:torch.Size([8, 2048]) +position_ids batch tensor after cp:torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_idsbatch tensor: torch.Size([8, 2048]) + tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: batch tensor after cp:position_ids torch.Size([8, 2048])tokens + torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:28:45] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 79.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor:batch tensor after cp: position_ids tokenstorch.Size([8, 8192]) +torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: batch tensor:position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) + tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask batch tensor:torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_idstokens torch.Size([8, 2048])batch tensor: +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +torch.Size([8, 8192]) +tokens batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +torch.Size([8, 8192])batch tensor: + loss_mask batch tensor:torch.Size([8, 8192]) +labels batch tensor:torch.Size([8, 8192]) +attention_maskbatch tensor: loss_masktorch.Size([8, 1, 8192, 8192]) +torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: batch tensor:position_ids attention_masktorch.Size([8, 8192]) +torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:28:45] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 96.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor:batch tensor: tokenstokens torch.Size([8, 8192]) +batch tensor:torch.Size([8, 8192]) +labels torch.Size([8, 8192])batch tensor: +batch tensor:batch tensor: labels torch.Size([8, 8192]) +labelsbatch tensor: loss_masktorch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: batch tensor:loss_mask attention_mask torch.Size([8, 8192]) +tokensbatch tensor: loss_maskbatch tensor after cp: torch.Size([8, 8192])tokens +batch tensor after cp: batch tensor:torch.Size([8, 8192]) torch.Size([8, 2048]) + tokens +torch.Size([8, 1, 8192, 8192]) +batch tensor: batch tensor:attention_mask position_ids torch.Size([8, 1, 8192, 8192])torch.Size([8, 8192]) + +batch tensor: position_ids torch.Size([8, 8192]) +attention_mask batch tensor:batch tensor after cp:torch.Size([8, 2048]) +torch.Size([8, 1, 8192, 8192])labelslabels +batch tensor after cp: batch tensor:torch.Size([8, 2048])labels +torch.Size([8, 8192]) batch tensor after cp:batch tensor: position_ids +batch tensor: tokens torch.Size([8, 8192]) + torch.Size([8, 2048]) +batch tensor: batch tensor:labels torch.Size([8, 8192]) + batch tensor:loss_masktorch.Size([8, 8192])batch tensor after cp:tokens + loss_maskloss_mask torch.Size([8, 2048]) torch.Size([8, 2048]) +torch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor after cp: + +tokensbatch tensor: loss_mask torch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor: + batch tensor after cp:attention_mask batch tensor:batch tensor:attention_masktorch.Size([8, 1, 2048, 8192]) +attention_maskbatch tensor after cp: torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) + attention_maskbatch tensor: labelstorch.Size([8, 1, 8192, 8192]) +torch.Size([8, 8192]) +batch tensor: batch tensor:position_ids loss_mask torch.Size([8, 8192])torch.Size([8, 8192]) + + labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 1, 2048, 8192])torch.Size([8, 8192])position_ids +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) + + batch tensor after cp:batch tensor: torch.Size([8, 2048])position_idsattention_mask +batch tensor: position_ids torch.Size([8, 8192]) + torch.Size([8, 2048])torch.Size([8, 1, 8192, 8192]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: batch tensor after cp:labels torch.Size([8, 2048])tokens + batch tensor after cp:torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +loss_mask batch tensor after cp:torch.Size([8, 2048]) labels + torch.Size([8, 2048])batch tensor after cp: + batch tensor:batch tensor after cp:attention_mask loss_mask torch.Size([8, 1, 2048, 8192]) torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +tokensbatch tensor: +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: batch tensor after cp: position_ids attention_mask tokenstorch.Size([8, 2048]) +torch.Size([8, 8192])torch.Size([8, 1, 2048, 8192]) + +batch tensor after cp: torch.Size([8, 8192])position_ids +batch tensor:torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) + batch tensor:labels labels torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor:batch tensor: loss_maskloss_mask torch.Size([8, 8192]) +batch tensor:torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +attention_mask batch tensor: torch.Size([8, 1, 8192, 8192])attention_mask + batch tensor:torch.Size([8, 1, 8192, 8192]) +position_ids batch tensor:torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: batch tensor after cp:position_ids tokenstorch.Size([8, 2048]) +torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokensbatch tensor after cp: torch.Size([8, 2048])tokens +batch tensor: position_ids torch.Size([8, 8192]) + batch tensor after cp:torch.Size([8, 2048]) labels + torch.Size([8, 2048])batch tensor after cp: +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) + batch tensor after cp:labels loss_mask torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: batch tensor after cp:attention_mask loss_masktorch.Size([8, 1, 2048, 8192]) +torch.Size([8, 2048])batch tensor after cp: + position_idsbatch tensor after cp: torch.Size([8, 2048])attention_mask +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) + torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:28:45] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 79.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: batch tensor:tokens tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: torch.Size([8, 8192])labels +torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048])batch tensor: +batch tensor after cp: labels tokenstorch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp:torch.Size([8, 8192]) attention_mask + torch.Size([8, 1, 2048, 8192]) +batch tensor:batch tensor after cp: labelsposition_ids torch.Size([8, 8192])torch.Size([8, 2048]) + +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor:batch tensor: loss_masklabels torch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor:batch tensor: attention_maskloss_mask torch.Size([8, 1, 8192, 8192])torch.Size([8, 8192]) + +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: batch tensor:position_ids attention_masktorch.Size([8, 8192]) +torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor:batch tensor after cp: tokenstokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 8192])torch.Size([8, 2048]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: loss_maskbatch tensor: torch.Size([8, 2048])labels +batch tensor: batch tensor:labels torch.Size([8, 8192]) + batch tensor after cp: torch.Size([8, 8192])attention_mask + batch tensor:torch.Size([8, 1, 2048, 8192]) +loss_mask batch tensor after cp:torch.Size([8, 8192]) +position_ids batch tensor:torch.Size([8, 2048]) + batch tensor:tokens loss_mask torch.Size([8, 8192]) +batch tensor: attention_masktorch.Size([8, 8192]) +attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +torch.Size([8, 1, 8192, 8192]) +batch tensor:batch tensor: position_idslabels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +torch.Size([8, 8192])batch tensor: + loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp:batch tensor after cp: loss_mask tokenstorch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp:torch.Size([8, 2048]) +attention_mask batch tensor after cp:torch.Size([8, 1, 2048, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +labelsbatch tensor after cp:batch tensor: position_idstorch.Size([8, 2048]) torch.Size([8, 2048]) +tokens +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp:torch.Size([8, 8192]) +attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: batch tensor after cp:labels position_idstorch.Size([8, 8192]) +batch tensor: attention_mask batch tensor after cp:torch.Size([8, 1, 8192, 8192]) +torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) + tokensbatch tensor: position_ids torch.Size([8, 2048])torch.Size([8, 8192]) + +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: batch tensor after cp:tokens tokens torch.Size([8, 2048]) +torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: batch tensor after cp:labels labelstorch.Size([8, 2048]) +torch.Size([8, 2048])batch tensor after cp: +batch tensor after cp: labels torch.Size([8, 2048]) + batch tensor after cp:loss_mask loss_masktorch.Size([8, 2048]) +torch.Size([8, 2048])batch tensor after cp: + attention_maskbatch tensor after cp: torch.Size([8, 1, 2048, 8192])attention_mask +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) + batch tensor after cp: torch.Size([8, 1, 2048, 8192])position_ids + batch tensor after cp:torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:28:45] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 80.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens batch tensor:torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) + batch tensor:tokens labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: torch.Size([8, 8192])loss_mask + torch.Size([8, 8192]) +batch tensor: batch tensor:labels attention_mask torch.Size([8, 8192])torch.Size([8, 1, 8192, 8192]) + +batch tensor: tokensbatch tensor: tokens torch.Size([8, 8192]) +batch tensor:batch tensor: loss_maskposition_ids torch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor:torch.Size([8, 8192]) labels +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens batch tensor after cp:torch.Size([8, 2048]) +tokensbatch tensor after cp: torch.Size([8, 2048])labels + batch tensor after cp:torch.Size([8, 2048]) +labelsbatch tensor after cp: loss_masktorch.Size([8, 2048]) +torch.Size([8, 2048])batch tensor after cp: + torch.Size([8, 8192])batch tensor: + batch tensor:labels loss_masktorch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: batch tensor:loss_mask attention_masktorch.Size([8, 8192]) + loss_maskbatch tensor after cp: torch.Size([8, 2048])attention_mask + batch tensor after cp:torch.Size([8, 1, 2048, 8192]) +attention_mask batch tensor after cp:torch.Size([8, 1, 2048, 8192]) +position_ids batch tensor after cp:torch.Size([8, 2048]) +position_ids torch.Size([8, 2048]) +torch.Size([8, 1, 8192, 8192]) +batch tensor: batch tensor:attention_mask position_ids torch.Size([8, 1, 8192, 8192])torch.Size([8, 8192]) + +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens batch tensor:torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokenslabels torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_masktorch.Size([8, 8192]) +torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp:batch tensor: attention_masklabels torch.Size([8, 1, 2048, 8192])torch.Size([8, 8192]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp:batch tensor: batch tensor after cp:loss_maskposition_ids tokenstorch.Size([8, 8192]) torch.Size([8, 2048]) + +torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor:batch tensor after cp: attention_masklabels torch.Size([8, 1, 8192, 8192])torch.Size([8, 2048]) + +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor:batch tensor after cp: position_idsloss_mask torch.Size([8, 8192])torch.Size([8, 2048]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048])batch tensor after cp: + batch tensor after cp:tokens attention_mask torch.Size([8, 2048])torch.Size([8, 1, 2048, 8192]) + +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp:batch tensor after cp: position_idslabels torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192])batch tensor after cp: +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) + batch tensor:tokens position_idstorch.Size([8, 2048]) +torch.Size([8, 8192])batch tensor after cp: + labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:28:45] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 78.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192])batch tensor: +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labelstokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: batch tensor:attention_mask labelstorch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor: + position_idsbatch tensor: torch.Size([8, 8192])loss_mask + torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_maskbatch tensor after cp: torch.Size([8, 1, 2048, 8192])tokens + batch tensor after cp:torch.Size([8, 2048]) +position_ids batch tensor after cp:torch.Size([8, 2048]) +labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask batch tensor:torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokensattention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_idstorch.Size([8, 8192]) +torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: batch tensor after cp:loss_mask tokenstorch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +torch.Size([8, 2048])batch tensor: +batch tensor after cp: tokens batch tensor:torch.Size([8, 2048]) + batch tensor after cp:attention_mask labelstorch.Size([8, 1, 8192, 8192]) +torch.Size([8, 2048]) + batch tensor after cp:tokens labels torch.Size([8, 2048]) +batch tensor:batch tensor after cp: position_idsloss_mask torch.Size([8, 8192])torch.Size([8, 2048]) + +batch tensor after cp: loss_masktorch.Size([8, 8192]) +torch.Size([8, 2048]) +batch tensor after cp:batch tensor: attention_mask torch.Size([8, 1, 2048, 8192]) +tokensbatch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor:batch tensor after cp: labelsattention_mask torch.Size([8, 8192])torch.Size([8, 1, 2048, 8192]) + +batch tensor: batch tensor after cp:loss_mask position_idstorch.Size([8, 8192]) +torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp:batch tensor: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:28:45] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 85.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: batch tensor:tokens tokens torch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor: + labels batch tensor:torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +labelsbatch tensor: torch.Size([8, 8192])loss_mask + batch tensor:torch.Size([8, 8192]) +loss_mask batch tensor:torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +attention_maskbatch tensor: torch.Size([8, 1, 8192, 8192])attention_mask + batch tensor:torch.Size([8, 1, 8192, 8192]) +position_ids batch tensor:torch.Size([8, 8192]) +position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp:batch tensor: tokens torch.Size([8, 2048])tokens + batch tensor after cp: labels torch.Size([8, 2048]) +torch.Size([8, 8192])batch tensor after cp: +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) + loss_maskbatch tensor: torch.Size([8, 2048])labels +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) + batch tensor after cp:torch.Size([8, 8192]) +attention_mask batch tensor: torch.Size([8, 1, 2048, 8192])loss_mask + torch.Size([8, 8192])batch tensor after cp: + position_idsbatch tensor: torch.Size([8, 2048])attention_mask + torch.Size([8, 1, 8192, 8192]) +batch tensor:batch tensor: position_ids torch.Size([8, 8192]) + tokens torch.Size([8, 8192]) +batch tensor: position_ids batch tensor:torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) + tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: batch tensor after cp:tokens tokens torch.Size([8, 2048]) +torch.Size([8, 2048])batch tensor after cp: +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) + labelsbatch tensor after cp: labelstorch.Size([8, 2048]) +torch.Size([8, 2048])batch tensor after cp: +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) + loss_maskbatch tensor after cp: torch.Size([8, 2048])loss_mask + batch tensor after cp:torch.Size([8, 2048]) +attention_mask batch tensor after cp: torch.Size([8, 1, 2048, 8192])attention_mask + batch tensor after cp:torch.Size([8, 1, 2048, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +position_idsbatch tensor after cp: torch.Size([8, 2048])position_ids + torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: tokensbatch tensor: torch.Size([8, 2048]) + batch tensor after cp:tokens labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: torch.Size([8, 8192])loss_mask + torch.Size([8, 2048]) +batch tensor: batch tensor after cp:labels attention_masktorch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +torch.Size([8, 1, 2048, 8192])batch tensor: + batch tensor after cp:loss_mask batch tensor after cp:position_idstorch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +tokens batch tensor:torch.Size([8, 2048]) torch.Size([8, 2048]) +attention_mask + batch tensor after cp:torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +labels batch tensor:torch.Size([8, 2048]) +position_idsbatch tensor after cp: torch.Size([8, 8192])loss_mask + torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: tokens batch tensor after cp: tokenstorch.Size([8, 2048]) +torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp:batch tensor after cp: labelslabels torch.Size([8, 2048]) +torch.Size([8, 2048])batch tensor after cp: + loss_mask batch tensor after cp:torch.Size([8, 2048]) +loss_maskbatch tensor after cp: attention_mask torch.Size([8, 2048])torch.Size([8, 1, 2048, 8192]) + +batch tensor after cp:batch tensor after cp: position_idsattention_mask torch.Size([8, 2048])torch.Size([8, 1, 2048, 8192]) + +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:28:45] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 78.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: batch tensor:tokens tokens torch.Size([8, 8192]) +batch tensor:torch.Size([8, 8192]) labels + torch.Size([8, 8192])batch tensor: + batch tensor:labels loss_mask torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor:batch tensor: loss_maskattention_mask torch.Size([8, 8192]) +torch.Size([8, 1, 8192, 8192]) +batch tensor:batch tensor: attention_maskposition_ids torch.Size([8, 1, 8192, 8192])torch.Size([8, 8192]) + +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048])batch tensor after cp: +batch tensor: tokens torch.Size([8, 8192]) + batch tensor after cp:tokens labelstorch.Size([8, 2048]) +torch.Size([8, 2048])batch tensor after cp: +batch tensor: labels torch.Size([8, 8192]) + labelsbatch tensor after cp: batch tensor after cp: torch.Size([8, 2048])loss_mask +tokens batch tensor after cp: torch.Size([8, 2048])loss_mask +torch.Size([8, 2048]) +batch tensor after cp:torch.Size([8, 2048]) batch tensor after cp: +batch tensor: loss_mask torch.Size([8, 8192]) +attention_mask batch tensor after cp:labels torch.Size([8, 1, 2048, 8192]) +attention_masktorch.Size([8, 2048])batch tensor after cp: + torch.Size([8, 1, 2048, 8192])position_idsbatch tensor after cp: + batch tensor after cp:loss_mask torch.Size([8, 2048]) position_ids +torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +torch.Size([8, 2048])batch tensor after cp: + attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: batch tensor:tokens labels torch.Size([8, 2048])torch.Size([8, 8192]) + +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp:batch tensor: labelsloss_mask torch.Size([8, 2048])torch.Size([8, 8192]) + +batch tensor after cp: batch tensor:loss_mask attention_masktorch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +torch.Size([8, 1, 8192, 8192])batch tensor after cp: + attention_maskbatch tensor: position_idstorch.Size([8, 1, 2048, 8192]) +torch.Size([8, 8192])batch tensor after cp: +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) + position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: batch tensor after cp:position_ids tokenstorch.Size([8, 8192]) +torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:28:45] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 78.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:28:45 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.028507232666015625 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.02861642837524414 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.02860879898071289 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.02866053581237793 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.028672456741333008 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.028911828994750977 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.02893996238708496 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.029036998748779297 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.0292050838470459 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.02989816665649414 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.02984905242919922 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.029946327209472656 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.030991554260253906 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.29493117332458496 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.30750274658203125 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.30890512466430664 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(216006656), 0), (np.int64(217055232), 1), (np.int64(218103808), 2), (np.int64(214321152), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(216006656), 0), (np.int64(217055232), 1), (np.int64(218103808), 2), (np.int64(214321152), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(216006656), 0), (np.int64(217055232), 1), (np.int64(218103808), 2), (np.int64(214321152), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(216006656), 0), (np.int64(217055232), 1), (np.int64(218103808), 2), (np.int64(214321152), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1885967254638672 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1814382076263428 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1904070377349854 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1807787418365479 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1882898807525635 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1815814971923828 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1876657009124756 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1819207668304443 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1925685405731201 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.182309627532959 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1903512477874756 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.182236671447754 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1901624202728271 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1822314262390137 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.010883331298828125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4436938762664795 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, plan time: 0.002597808837890625 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.5273733 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.30495572090148926 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.3034844398498535 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.939338684082031e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.528128 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, plan time: 0.06100726127624512 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.5275881 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, plan time: 0.30545830726623535 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.7738037109375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.5276968 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.528156 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.869171142578125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.3045542240142822 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.963180541992188e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, plan time: 0.05018043518066406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.528241 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, plan time: 0.07755184173583984 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.989738464355469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.5282981 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.3018629550933838 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.3048088550567627 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.5283167 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.5282972 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.5282958 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.224082946777344e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, plan time: 0.3059103488922119 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00011181831359863281 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.131431579589844e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.463859558105469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.72747802734375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.528437 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.3023111820220947 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.678436279296875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.5284877 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, plan time: 0.30519938468933105 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.3018927574157715 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.5285716 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.699562072753906e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.528547 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00011324882507324219 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.534027099609375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, plan time: 0.3058803081512451 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.30663442611694336 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.5288708 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541327.5321662 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00016641616821289062 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.1975250244140625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04077625274658203 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.5695198 rank: 13, write(async) time: 0.041201114654541016 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04231142997741699 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04236412048339844 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.570921 rank: 6, write(async) time: 0.04279160499572754 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.5709765 rank: 7, write(async) time: 0.042821407318115234 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04207873344421387 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04278063774108887 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.5711265 rank: 11, write(async) time: 0.04255342483520508 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.5714388 rank: 4, write(async) time: 0.04319953918457031 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.043105125427246094 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04276323318481445 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.572047 rank: 1, write(async) time: 0.04355645179748535 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.5716448 rank: 10, write(async) time: 0.04320883750915527 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04404878616333008 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.044172048568725586 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.5728312 rank: 3, write(async) time: 0.04453301429748535 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.5721838 rank: 15, write(async) time: 0.044593095779418945 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04419970512390137 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.5731676 rank: 2, write(async) time: 0.04462170600891113 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04373431205749512 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.045037031173706055 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.5731468 rank: 8, write(async) time: 0.045452117919921875 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.5731509 rank: 9, write(async) time: 0.04428720474243164 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04408574104309082 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.5766733 rank: 0, write(async) time: 0.04450488090515137 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.048711299896240234 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.576493 rank: 14, write(async) time: 0.04911923408508301 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.051802873611450195 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.5805871 rank: 12, write(async) time: 0.05228614807128906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05629420280456543 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541327.585081 rank: 5, write(async) time: 0.056783199310302734 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.621246337890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 1.5735626220703125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.02059626579284668 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.020413875579833984 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.021216869354248047 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 1.9550323486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 1.9788742065429688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 1.9311904907226562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.03300213813781738 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.02330160140991211 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.024167537689208984 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.025695085525512695 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 2.2411346435546875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 1.5497207641601562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 1.4066696166992188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 1.52587890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 1.5735626220703125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 1.6450881958007812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 1.6450881958007812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 0.020854711532592773 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 0.02236175537109375 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.02908158302307129 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 0.022528648376464844 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 0.022376537322998047 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 0.028257369995117188 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 0.02507495880126953 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 0.024898052215576172 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 0.03248786926269531 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 135168, before: 1712672768, after: 1712807936 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 262144, before: 1702514688, after: 1702776832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 229376, before: 1703276544, after: 1703505920 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 11902976, before: 2019164160, after: 2031067136 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 106057728, before: 1706979328, after: 1813037056 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114216960, before: 1714835456, after: 1829052416 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114331648, before: 1720557568, after: 1834889216 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 105906176, before: 1709604864, after: 1815511040 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110133248, before: 1730764800, after: 1840898048 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 105947136, before: 1707958272, after: 1813905408 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110022656, before: 1714835456, after: 1824858112 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 109441024, before: 1709604864, after: 1819045888 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110112768, before: 1706901504, after: 1817014272 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114421760, before: 1730764800, after: 1845186560 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 113995776, before: 1761800192, after: 1875795968 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114069504, before: 1714315264, after: 1828384768 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.0776384, rank: 11, write(sync,parallel): 0.33428263664245605 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.0825217, rank: 15, write(sync,parallel): 0.34415769577026367 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109047808, before: 1709789184, after: 1818836992 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.0925424, rank: 10, write(sync,parallel): 0.35059595108032227 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.094242, rank: 13, write(sync,parallel): 0.3622133731842041 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 109846528, before: 1708023808, after: 1817870336 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.39s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109113344, before: 1725456384, after: 1834569728 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.41s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 113995776, before: 1761800192, after: 1875795968 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108888064, before: 1711869952, after: 1820758016 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108781568, before: 1732227072, after: 1841008640 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114360320, before: 1725456384, after: 1839816704 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.1335645, rank: 14, write(sync,parallel): 0.39024996757507324 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 109957120, before: 1720557568, after: 1830514688 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.1488752, rank: 8, write(sync,parallel): 0.4019768238067627 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110231552, before: 1714315264, after: 1824546816 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.1548934, rank: 7, write(sync,parallel): 0.4799461364746094 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.45s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.46s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114200576, before: 1711869952, after: 1826070528 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.53s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.1825757, rank: 9, write(sync,parallel): 0.43512415885925293 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114364416, before: 1709789184, after: 1824153600 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.1908102, rank: 12, write(sync,parallel): 0.43964290618896484 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.2122135, rank: 6, write(sync,parallel): 0.538017749786377 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.50s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.216916, rank: 5, write(sync,parallel): 0.5245983600616455 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.51s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 118394880, before: 1732227072, after: 1850621952 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.59s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.60s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.2693512, rank: 4, write(sync,parallel): 0.5880081653594971 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.65s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212271104, before: 1712672768, after: 1924943872 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.4420788, rank: 1, write(sync,parallel): 0.7335734367370605 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212312064, before: 1702514688, after: 1914826752 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.79s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212164608, before: 1703276544, after: 1915441152 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.4968545, rank: 3, write(sync,parallel): 0.7817113399505615 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.85s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.5319996, rank: 2, write(sync,parallel): 0.8175501823425293 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212262912, before: 2019164160, after: 2231427072 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.88s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541328.5949576, rank: 0, write(sync,parallel): 0.8535680770874023 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.92s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6323292, 7, gather: 0.4498171806335449 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6324117, 6, gather: 0.3947267532348633 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6324959, 5, gather: 0.3776540756225586 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6325986, 2, gather: 0.06634831428527832 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6325834, 4, gather: 0.32749342918395996 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6325967, 3, gather: 0.10008645057678223 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6328444, 1, gather: 0.1637117862701416 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6328645, 12, gather: 0.4054234027862549 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6328585, 11, gather: 0.5280086994171143 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6328814, 13, gather: 0.5129551887512207 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6328685, 10, gather: 0.5133767127990723 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6329904, 14, gather: 0.4689674377441406 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6329093, 9, gather: 0.4174964427947998 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6330547, 8, gather: 0.4570600986480713 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6331596, 15, gather: 0.5231156349182129 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.6369045, 0, gather: 0.0067822933197021484 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541328.654485, metadata_write: 0.01745128631591797 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0267s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1873s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0904s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1243s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4740s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4016s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5367s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5363s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4802s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4292s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5514s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5463s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4409s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4187s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3519s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4921s + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/4, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.0024263858795166016 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.002461671829223633 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.0024499893188476562 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.002376556396484375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.002468109130859375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.0024051666259765625 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.002402782440185547 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.0023920536041259766 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.0024499893188476562 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.0024824142456054688 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.0024268627166748047 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.0024530887603759766 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.002398967742919922 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.002452373504638672 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.002401590347290039 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.002310037612915039 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask batch tensor after cp:torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) + tokensbatch tensor: attention_masktorch.Size([8, 2048]) +torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: batch tensor:labels position_idstorch.Size([8, 2048]) +torch.Size([8, 8192])batch tensor after cp: + loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_idsbatch tensor after cp: torch.Size([8, 8192])tokens +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) + torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 10 +Done exporting trace 10 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (2885.14, 2887.12) +---------------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on validation set | lm loss value: 1.276657E+01 | lm loss PPL: 3.503091E+05 | +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: position_idsbatch tensor after cp: tokenstorch.Size([8, 8192]) +torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: batch tensor:loss_mask torch.Size([8, 8192]) + tokens batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: torch.Size([8, 8192])position_ids +torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +Start exporting trace 11 +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Done exporting trace 11 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (42.48, 42.82) +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 1.276657E+01 | lm loss PPL: 3.503091E+05 | +---------------------------------------------------------------------------------------------------------- +Running ctx_length=2048, TP_SIZE=4, CP_SIZE=4, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 2048 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 2048 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 16, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 2048 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 16 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 2048 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 2048 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 16 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +INFO:megatron.training.initialize:Setting logging level to 0 +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.044 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.474 seconds +time to initialize megatron (seconds): 8.962 +[after megatron is initialized] datetime: 2025-06-21 21:29:30 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (148442112 elements, 148442112 padded size): + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.embedding.position_embeddings.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +>>> embedding>>> embedding + +>>> decoder>>> decoder + +>>> output_layer>>> output_layer + + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (118.57, 119.11) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:29:31 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=2048, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005699 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33296 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002607 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33281 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002598 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33343 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:29:31 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (632.13, 639.90) + train/valid/test-data-iterators-setup ..........: (18.31, 143.81) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:29:31 +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB + [2025-06-21 21:29:46] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 14928.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 1] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 11902.0 | max reserved: 11902.0[Rank 3] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 11678.0 | max reserved: 11678.0 + +[Rank 14] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 12028.0 | max reserved: 12028.0[Rank 10] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 11768.0 | max reserved: 11768.0 + +[Rank 15] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 11772.0 | max reserved: 11772.0 +[Rank 8] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 12024.0 | max reserved: 12024.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 11708.0 | max reserved: 11708.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 11932.0 | max reserved: 11932.0[Rank 6] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 11932.0 | max reserved: 11932.0 + +[Rank 0] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 11902.0 | max reserved: 11902.0 +[Rank 12] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 11900.0 | max reserved: 11900.0 +[Rank 11] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 11768.0 | max reserved: 11768.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 11932.0 | max reserved: 11932.0 +[Rank 9] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 12024.0 | max reserved: 12024.0[Rank 13] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 11900.0 | max reserved: 11900.0 + +[Rank 2] (after 1 iterations) memory (MB) | allocated: 4413.03564453125 | max allocated: 10689.46533203125 | reserved: 11902.0 | max reserved: 11902.0 +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp:batch tensor: tokenslabels torch.Size([8, 4096])torch.Size([8, 16384]) + +batch tensor after cp:batch tensor: labelsloss_mask torch.Size([8, 4096])torch.Size([8, 16384]) + +batch tensor after cp:batch tensor: loss_maskattention_mask torch.Size([8, 4096])torch.Size([8, 1, 16384, 16384]) + +batch tensor after cp:batch tensor: attention_maskposition_ids torch.Size([8, 16384])torch.Size([8, 1, 4096, 16384]) + +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor:batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: tokensposition_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:29:46] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 229.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor:batch tensor: labels torch.Size([8, 16384]) +tokensbatch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 16384])torch.Size([8, 1, 16384, 16384]) + +batch tensor: position_ids batch tensor:torch.Size([8, 16384]) +labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor:batch tensor: loss_mask torch.Size([8, 16384])tokens +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) + batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +torch.Size([8, 16384])batch tensor: + position_ids torch.Size([8, 16384])batch tensor: +batch tensor: position_ids torch.Size([8, 16384]) + labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: batch tensor after cp:attention_mask tokenstorch.Size([8, 1, 4096, 16384]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([8, 4096])torch.Size([8, 4096]) + +batch tensor after cp:batch tensor after cp: labelslabels torch.Size([8, 4096])torch.Size([8, 4096]) +batch tensor after cp:torch.Size([8, 4096]) +position_ids batch tensor after cp:torch.Size([8, 4096]) +labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: + loss_maskbatch tensor after cp: torch.Size([8, 4096])loss_mask + torch.Size([8, 4096])batch tensor after cp: +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) + attention_maskbatch tensor after cp: attention_masktorch.Size([8, 1, 4096, 16384]) +torch.Size([8, 1, 4096, 16384])batch tensor after cp: + position_idsbatch tensor after cp: torch.Size([8, 4096])position_ids +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) + torch.Size([8, 4096]) +batch tensor after cp: loss_maskbatch tensor after cp: torch.Size([8, 4096])tokens + batch tensor after cp: torch.Size([8, 4096])attention_mask +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) + batch tensor after cp:torch.Size([8, 1, 4096, 16384]) +labels batch tensor after cp: torch.Size([8, 4096])position_ids + batch tensor after cp:torch.Size([8, 4096]) +loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384])batch tensor: +batch tensor: tokens torch.Size([8, 16384]) + tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:29:46] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 201.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_maskbatch tensor: torch.Size([8, 16384]) + tokensbatch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor:torch.Size([8, 16384]) +position_ids torch.Size([8, 16384])batch tensor: +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) + labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([8, 4096])torch.Size([8, 4096]) + +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp:batch tensor after cp: labelslabels torch.Size([8, 4096])torch.Size([8, 4096]) + +batch tensor after cp:batch tensor after cp: loss_maskloss_mask torch.Size([8, 4096])torch.Size([8, 4096]) + +batch tensor after cp:batch tensor after cp: attention_maskattention_mask torch.Size([8, 1, 4096, 16384]) +torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: batch tensor after cp:position_ids position_idstorch.Size([8, 4096]) +torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: batch tensor:loss_mask torch.Size([8, 16384]) + tokensbatch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +torch.Size([8, 16384])batch tensor: +position_ids torch.Size([8, 16384])batch tensor: + labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096])batch tensor after cp: +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) + tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:29:46] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 200.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor:batch tensor: labels tokenstorch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor:torch.Size([8, 16384]) + attention_mask batch tensor: torch.Size([8, 1, 16384, 16384])labels +torch.Size([8, 16384]) +batch tensor: batch tensor:position_ids loss_masktorch.Size([8, 16384]) +torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384])batch tensor after cp: +batch tensor after cp: position_ids torch.Size([8, 4096]) + batch tensor after cp:tokens position_idstorch.Size([8, 4096]) +torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask batch tensor:torch.Size([8, 16384]) +batch tensor: tokensattention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: torch.Size([8, 16384])position_ids +torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:29:47] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 199.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_maskbatch tensor after cp: torch.Size([8, 1, 4096, 16384]) +tokensbatch tensor after cp: position_idstorch.Size([8, 4096]) +torch.Size([8, 4096])batch tensor after cp: + labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:29:47] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 198.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor:batch tensor: position_ids torch.Size([8, 16384])tokens + torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor:batch tensor: tokens tokens torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor:batch tensor: labelslabels torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor:batch tensor: loss_maskloss_mask torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor:batch tensor: attention_maskattention_mask torch.Size([8, 1, 16384, 16384])torch.Size([8, 1, 16384, 16384]) + +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor:batch tensor: position_idsposition_ids torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokensbatch tensor after cp: torch.Size([8, 4096])tokens + batch tensor after cp:torch.Size([8, 4096]) +labels batch tensor after cp:torch.Size([8, 4096]) +batch tensor after cp: attention_maskbatch tensor after cp: torch.Size([8, 1, 4096, 16384])tokens + batch tensor after cp:torch.Size([8, 4096]) +labelsbatch tensor after cp: torch.Size([8, 4096])loss_mask + batch tensor after cp:torch.Size([8, 4096]) +position_ids batch tensor after cp:torch.Size([8, 4096]) +labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +loss_mask batch tensor after cp: torch.Size([8, 4096])attention_mask + batch tensor after cp:torch.Size([8, 1, 4096, 16384]) +attention_mask batch tensor after cp: torch.Size([8, 1, 4096, 16384])position_ids + batch tensor after cp:torch.Size([8, 4096]) +position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids batch tensor:torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) + tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:29:47] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 198.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor:batch tensor: tokenstokens torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor:batch tensor: labelslabels torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor:batch tensor: loss_maskloss_mask torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: batch tensor:attention_mask attention_mask torch.Size([8, 1, 16384, 16384]) +torch.Size([8, 1, 16384, 16384]) +batch tensor: batch tensor:position_ids position_idstorch.Size([8, 16384]) +torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens batch tensor after cp:torch.Size([8, 4096]) +tokens batch tensor after cp: labelstorch.Size([8, 4096]) +torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: batch tensor after cp:labels loss_masktorch.Size([8, 4096]) +torch.Size([8, 4096])batch tensor after cp: +batch tensor: labels torch.Size([8, 16384]) + batch tensor after cp:loss_mask attention_masktorch.Size([8, 4096]) +batch tensor after cp:torch.Size([8, 1, 4096, 16384]) +attention_maskbatch tensor after cp: position_idstorch.Size([8, 1, 4096, 16384]) +torch.Size([8, 4096])batch tensor after cp: + position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:29:47] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 195.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: batch tensor:attention_mask torch.Size([8, 1, 4096, 16384]) +tokensbatch tensor after cp: position_ids torch.Size([8, 4096]) +torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp:batch tensor: tokens torch.Size([8, 4096]) +tokensbatch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 16384])torch.Size([8, 4096]) + +batch tensor after cp: batch tensor:attention_mask labels torch.Size([8, 1, 4096, 16384]) +torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: batch tensor:position_ids torch.Size([8, 4096])loss_mask + torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: batch tensor:loss_mask torch.Size([8, 4096]) +tokensbatch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: torch.Size([8, 16384])position_ids +batch tensor: labels torch.Size([8, 4096])torch.Size([8, 16384]) + +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask batch tensor after cp:torch.Size([8, 1, 16384, 16384]) +tokens batch tensor: position_idstorch.Size([8, 4096]) +torch.Size([8, 16384])batch tensor after cp: +batch tensor: tokens torch.Size([8, 16384]) + labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:29:47] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 197.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor:batch tensor after cp: tokenstokens torch.Size([8, 4096]) +batch tensor after cp:torch.Size([8, 16384]) labels + torch.Size([8, 4096])batch tensor: + batch tensor after cp:labels loss_masktorch.Size([8, 16384]) +torch.Size([8, 4096])batch tensor: + loss_maskbatch tensor after cp: attention_masktorch.Size([8, 16384]) +torch.Size([8, 1, 4096, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor:batch tensor after cp: attention_maskposition_ids torch.Size([8, 4096])torch.Size([8, 1, 16384, 16384]) + +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor:batch tensor: tokens tokens torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor:batch tensor: labelslabels torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor:batch tensor: loss_maskloss_mask torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor:batch tensor: attention_maskattention_mask torch.Size([8, 1, 16384, 16384])torch.Size([8, 1, 16384, 16384]) + +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor:batch tensor: position_idsposition_ids torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([8, 4096])torch.Size([8, 4096]) + +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp:batch tensor after cp: labelslabels torch.Size([8, 4096]) +torch.Size([8, 4096])batch tensor after cp: + batch tensor after cp:loss_mask loss_masktorch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +torch.Size([8, 4096])batch tensor after cp: + attention_maskbatch tensor after cp: attention_masktorch.Size([8, 1, 4096, 16384]) +torch.Size([8, 1, 4096, 16384])batch tensor after cp: + position_idsbatch tensor after cp: torch.Size([8, 4096])position_ids + torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:29:48] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 198.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:29:48 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.02829575538635254 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.02834939956665039 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.028365135192871094 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.02838301658630371 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.02840256690979004 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.028412580490112305 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.02917790412902832 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.031248807907104492 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.03349781036376953 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.03349781036376953 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.033489227294921875 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.03354763984680176 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.03354239463806152 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.03371167182922363 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.033998966217041016 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.03407883644104004 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(220612608), 0), (np.int64(221249536), 1), (np.int64(226492416), 2), (np.int64(222298112), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(220612608), 0), (np.int64(221249536), 1), (np.int64(226492416), 2), (np.int64(222298112), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(220612608), 0), (np.int64(221249536), 1), (np.int64(226492416), 2), (np.int64(222298112), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(220612608), 0), (np.int64(221249536), 1), (np.int64(226492416), 2), (np.int64(222298112), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.183610200881958 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1825296878814697 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1831228733062744 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1770577430725098 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1835672855377197 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1770102977752686 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.177154541015625 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1772630214691162 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1829156875610352 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1774113178253174 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1774580478668213 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.199512243270874 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1851389408111572 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1782793998718262 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.012725353240966797 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1855823993682861 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.007322788238525391 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.007762908935546875 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.004144430160522461 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.00726771354675293 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.005079746246337891 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.004984855651855469 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, plan time: 0.006655216217041016 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, plan time: 0.007507801055908203 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.463772 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.4637733 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.4637775 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.4637785 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.4637785 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, plan time: 0.0076563358306884766 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.463781 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.4634504 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, plan time: 0.0078084468841552734 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.005692005157470703 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.221366882324219e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.459785461425781e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.1975250244140625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.984306335449219e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.699562072753906e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.29425048828125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.463459 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.4638686 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.4634728 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.723403930664062e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, plan time: 0.007799863815307617 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, plan time: 0.0074462890625 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.4634833 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, plan time: 0.006166934967041016 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, plan time: 0.005557537078857422 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.463509 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.4635093 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.463515 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.4635208 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.104873657226562e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.532669067382812e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.556510925292969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.413459777832031e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.389617919921875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.318092346191406e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.367134094238281e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.700920104980469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.006085395812988281 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541389.467447 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.036064147949219e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04099464416503906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04112553596496582 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.5051906 rank: 6, write(async) time: 0.04140925407409668 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.5052884 rank: 5, write(async) time: 0.04151630401611328 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.042519569396972656 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.5063672 rank: 15, write(async) time: 0.04291486740112305 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04303312301635742 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.043138742446899414 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.5068967 rank: 11, write(async) time: 0.04343557357788086 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.5073166 rank: 2, write(async) time: 0.043535709381103516 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04328417778015137 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.507164 rank: 10, write(async) time: 0.04367876052856445 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04510664939880371 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.5093315 rank: 3, write(async) time: 0.04555058479309082 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.045250654220581055 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.509176 rank: 12, write(async) time: 0.04566812515258789 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04546403884887695 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.045467376708984375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.509332 rank: 8, write(async) time: 0.04586005210876465 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.5097098 rank: 7, write(async) time: 0.04593539237976074 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05039024353027344 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.5146036 rank: 1, write(async) time: 0.05082511901855469 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04723310470581055 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.5150945 rank: 0, write(async) time: 0.04764819145202637 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05096435546875 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.5149179 rank: 9, write(async) time: 0.051406145095825195 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.051369667053222656 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.515663 rank: 4, write(async) time: 0.05179929733276367 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05181455612182617 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.5157416 rank: 13, write(async) time: 0.05222177505493164 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05204176902770996 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541389.5159733 rank: 14, write(async) time: 0.0524592399597168 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.6450881958007812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 1.5020370483398438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 1.2874603271484375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 1.5497207641601562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 0.02134561538696289 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 1.6450881958007812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 2.288818359375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 2.1696090698242188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.02252984046936035 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.0226900577545166 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 2.288818359375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 0.02884840965270996 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 0.033165931701660156 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.03327441215515137 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 0.024422645568847656 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 0.025647640228271484 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 0.02785181999206543 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 0.02484869956970215 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 0.030237436294555664 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.025798797607421875 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.02449202537536621 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.025942087173461914 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.033489227294921875 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.03139686584472656 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 196608, before: 1757933568, after: 1758130176 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 200704, before: 1721786368, after: 1721987072 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 282624, before: 1742733312, after: 1743015936 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 17788928, before: 2019377152, after: 2037166080 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 106061824, before: 1743278080, after: 1849339904 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108949504, before: 1721278464, after: 1830227968 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110129152, before: 1743278080, after: 1853407232 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109072384, before: 1734701056, after: 1843773440 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108699648, before: 1742114816, after: 1850814464 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114356224, before: 1721278464, after: 1835634688 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 106000384, before: 1717841920, after: 1823842304 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114393088, before: 1772232704, after: 1886625792 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541389.9186554, rank: 15, write(sync,parallel): 0.3033483028411865 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 117190656, before: 1729982464, after: 1847173120 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110067712, before: 1717841920, after: 1827909632 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541389.9350128, rank: 6, write(sync,parallel): 0.313035249710083 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110243840, before: 1749557248, after: 1859801088 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 106090496, before: 1747685376, after: 1853775872 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114429952, before: 1734701056, after: 1849131008 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114438144, before: 1749557248, after: 1863995392 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.36s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110178304, before: 1747685376, after: 1857863680 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 121597952, before: 1779855360, after: 1901453312 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114143232, before: 1742114816, after: 1856258048 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110198784, before: 1772232704, after: 1882431488 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.37s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541389.9692082, rank: 13, write(sync,parallel): 0.3366730213165283 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114143232, before: 1729982464, after: 1844125696 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541389.9768114, rank: 11, write(sync,parallel): 0.3369107246398926 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 109969408, before: 1732018176, after: 1841987584 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114167808, before: 1732018176, after: 1846185984 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541389.9814022, rank: 7, write(sync,parallel): 0.3412282466888428 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541389.9858935, rank: 10, write(sync,parallel): 0.3451204299926758 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541389.9901035, rank: 5, write(sync,parallel): 0.36812257766723633 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541389.9935718, rank: 14, write(sync,parallel): 0.35735034942626953 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 118546432, before: 1742229504, after: 1860775936 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541390.0047078, rank: 4, write(sync,parallel): 0.3574674129486084 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114163712, before: 1779855360, after: 1894019072 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114364416, before: 1742229504, after: 1856593920 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.41s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.43s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.43s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541390.0387597, rank: 9, write(sync,parallel): 0.3937516212463379 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.42s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541390.046172, rank: 12, write(sync,parallel): 0.40555453300476074 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541390.047409, rank: 8, write(sync,parallel): 0.4066610336303711 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.46s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.46s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.47s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212271104, before: 1757933568, after: 1970204672 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212377600, before: 1742733312, after: 1955110912 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212484096, before: 1721786368, after: 1934270464 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541390.1287403, rank: 2, write(sync,parallel): 0.48200249671936035 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212439040, before: 2019377152, after: 2231816192 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541390.148445, rank: 3, write(sync,parallel): 0.49837660789489746 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541390.149615, rank: 1, write(sync,parallel): 0.4947385787963867 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.54s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.56s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541390.1866257, rank: 0, write(sync,parallel): 0.4944157600402832 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.57s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.57s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.22802, 6, gather: 0.2659721374511719 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.2280495, 5, gather: 0.20602965354919434 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.2280936, 3, gather: 0.0458681583404541 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.228023, 7, gather: 0.20926141738891602 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.227912, 12, gather: 0.15193963050842285 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.2281675, 4, gather: 0.18630766868591309 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.2279336, 11, gather: 0.21693778038024902 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.2282236, 2, gather: 0.0682535171508789 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.2279522, 10, gather: 0.2108314037322998 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.228234, 1, gather: 0.037377357482910156 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.228101, 14, gather: 0.2010059356689453 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.2281132, 9, gather: 0.16080379486083984 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.2281203, 15, gather: 0.28046584129333496 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.2281365, 8, gather: 0.15048980712890625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.228228, 13, gather: 0.22771573066711426 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.2307932, 0, gather: 0.005178928375244141 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541390.262091, metadata_write: 0.031170368194580078 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2228s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0734s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0392s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1041s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0823s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2422s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3023s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1861s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2469s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1966s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1878s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2368s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3165s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2460s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2637s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2530s + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/4, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.004207611083984375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.004242658615112305 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.00421142578125 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.0042018890380859375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.004209280014038086 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.004215717315673828 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.004204273223876953 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.004240751266479492 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.0042226314544677734 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.004226207733154297 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.00423884391784668 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.004206180572509766 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.004209280014038086 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.004167318344116211 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.004200935363769531 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.004227638244628906 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_idsbatch tensor: torch.Size([8, 4096]) + tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens batch tensor after cp: torch.Size([8, 16384])tokens +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +torch.Size([8, 4096])batch tensor: + labelsbatch tensor after cp: torch.Size([8, 16384])labels +batch tensor after cp: position_ids torch.Size([8, 4096]) + torch.Size([8, 4096])batch tensor: + batch tensor after cp:loss_mask loss_masktorch.Size([8, 16384]) +torch.Size([8, 4096]) +batch tensor:batch tensor after cp: attention_maskattention_mask torch.Size([8, 1, 16384, 16384])torch.Size([8, 1, 4096, 16384]) + +batch tensor after cp:batch tensor: position_idsposition_ids torch.Size([8, 4096])torch.Size([8, 16384]) + +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384])batch tensor after cp: + batch tensor:tokens loss_mask torch.Size([8, 16384])torch.Size([8, 4096]) + +batch tensor after cp:batch tensor: labelsattention_mask torch.Size([8, 4096]) +torch.Size([8, 1, 16384, 16384])batch tensor after cp: + loss_maskbatch tensor: torch.Size([8, 4096])position_ids + batch tensor after cp:torch.Size([8, 16384]) +attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 10 +Done exporting trace 10 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (3068.05, 3068.52) +---------------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on validation set | lm loss value: 1.103777E+01 | lm loss PPL: 6.217886E+04 | +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 11 +Done exporting trace 11 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (99.29, 99.64) +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 1.103777E+01 | lm loss PPL: 6.217886E+04 | +---------------------------------------------------------------------------------------------------------- +Running ctx_length=4096, TP_SIZE=4, CP_SIZE=4, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 4 +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +using world size: 16, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 4096 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 16 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 4096 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 4096 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 16 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.044 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.492 seconds +time to initialize megatron (seconds): 10.728 +[after megatron is initialized] datetime: 2025-06-21 21:30:35 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (156830720 elements, 156830720 padded size): + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.47, 3.69) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:30:36 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=4096, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005601 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16648 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002168 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16640 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002034 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16671 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:30:36 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (596.64, 606.53) + train/valid/test-data-iterators-setup ..........: (17.29, 132.45) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:30:36 +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB + [2025-06-21 21:30:49] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 13752.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 1] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27726.0 | max reserved: 27726.0 +[Rank 8] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27534.0 | max reserved: 27534.0 +[Rank 3] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27726.0 | max reserved: 27726.0 +[Rank 15] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27938.0 | max reserved: 27938.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27790.0 | max reserved: 27790.0 +[Rank 13] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27938.0 | max reserved: 27938.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27726.0 | max reserved: 27726.0 +[Rank 11] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27490.0 | max reserved: 27490.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27790.0 | max reserved: 27790.0 +[Rank 9] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27874.0 | max reserved: 27874.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27278.0 | max reserved: 27278.0 +[Rank 14] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27426.0 | max reserved: 27426.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27342.0 | max reserved: 27342.0 +[Rank 12] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27278.0 | max reserved: 27278.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27790.0 | max reserved: 27790.0 +[Rank 10] (after 1 iterations) memory (MB) | allocated: 12193.41064453125 | max allocated: 25364.52783203125 | reserved: 27534.0 | max reserved: 27534.0 +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens batch tensor after cp: tokens torch.Size([8, 32768])torch.Size([8, 8192]) + +batch tensor after cp:batch tensor: labelslabels torch.Size([8, 8192])torch.Size([8, 32768]) + +batch tensor after cp:batch tensor: loss_maskloss_mask torch.Size([8, 8192])torch.Size([8, 32768]) + +batch tensor after cp: attention_maskbatch tensor: attention_masktorch.Size([8, 1, 8192, 32768]) +torch.Size([8, 1, 32768, 32768])batch tensor after cp: + position_idsbatch tensor: position_idstorch.Size([8, 8192]) +torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:30:50] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 663.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:30:51] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 637.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768])batch tensor after cp: + tokens batch tensor: torch.Size([8, 8192])labels + batch tensor after cp:torch.Size([8, 32768]) +labels batch tensor:torch.Size([8, 8192]) +loss_mask batch tensor after cp: torch.Size([8, 32768]) +loss_mask torch.Size([8, 8192])batch tensor: + batch tensor after cp:attention_mask attention_mask torch.Size([8, 1, 32768, 32768])torch.Size([8, 1, 8192, 32768]) + +batch tensor:batch tensor after cp: position_idsposition_ids torch.Size([8, 32768])torch.Size([8, 8192]) + +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids batch tensor after cp:torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: loss_maskbatch tensor after cp: torch.Size([8, 32768])tokens +batch tensor: position_ids torch.Size([8, 32768]) + batch tensor:torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +attention_mask batch tensor after cp: torch.Size([8, 1, 32768, 32768])labels + torch.Size([8, 8192])batch tensor: +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) + batch tensor after cp:position_ids loss_masktorch.Size([8, 32768]) +torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:30:51] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 639.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokensbatch tensor: tokens torch.Size([8, 32768]) +torch.Size([8, 32768])batch tensor: + labels batch tensor:torch.Size([8, 32768]) labels + batch tensor: torch.Size([8, 32768])loss_mask + batch tensor:torch.Size([8, 32768]) +loss_mask torch.Size([8, 32768])batch tensor: + attention_mask batch tensor: attention_masktorch.Size([8, 1, 32768, 32768]) +torch.Size([8, 1, 32768, 32768])batch tensor: + position_idsbatch tensor: torch.Size([8, 32768])position_ids + torch.Size([8, 32768]) +batch tensor after cp: tokens batch tensor after cp:torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +tokensbatch tensor after cp: labelstorch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: batch tensor after cp:labels loss_masktorch.Size([8, 8192]) torch.Size([8, 8192]) + +batch tensor after cp:batch tensor after cp: loss_maskattention_mask torch.Size([8, 8192])torch.Size([8, 1, 8192, 32768]) + +batch tensor after cp:batch tensor after cp: attention_maskposition_ids torch.Size([8, 8192])torch.Size([8, 1, 8192, 32768]) + +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:30:52] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 625.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:30:53] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 696.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:30:53] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 636.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:30:54] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 632.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:30:55] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 619.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp:batch tensor after cp: position_ids tokenstorch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 32768]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:30:55] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 619.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:30:55 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.02683711051940918 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.026871204376220703 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.02701854705810547 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.02705240249633789 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.02704167366027832 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.027249813079833984 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.02784132957458496 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.03412365913391113 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.03387641906738281 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.03424358367919922 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.034239768981933594 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.034300804138183594 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.03435087203979492 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.03479766845703125 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.03605318069458008 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.03815722465515137 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(236978176), 0), (np.int64(234244096), 1), (np.int64(234881024), 2), (np.int64(234881024), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(236978176), 0), (np.int64(234244096), 1), (np.int64(234881024), 2), (np.int64(234881024), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(236978176), 0), (np.int64(234244096), 1), (np.int64(234881024), 2), (np.int64(234881024), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(236978176), 0), (np.int64(234244096), 1), (np.int64(234881024), 2), (np.int64(234881024), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1551096439361572 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1581141948699951 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1615729331970215 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1553685665130615 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1612138748168945 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.155395269393921 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1590583324432373 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1554701328277588 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1810789108276367 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.162351369857788 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.156102180480957 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1636176109313965 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1564345359802246 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.161928415298462 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.011788368225097656 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1582739353179932 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, plan time: 0.005267620086669922 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, plan time: 0.005450248718261719 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.005506992340087891 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.0038444995880126953 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.004243612289428711 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.005492210388183594 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.0039556026458740234 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.005537748336791992 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, plan time: 0.004428863525390625 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.9896915 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.9896924 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.9896932 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.989695 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.9894614 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.9894617 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, plan time: 0.00656437873840332 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.9896955 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.9897068 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.9894817 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 4.935264587402344e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.269050598144531e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.030632019042969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.9894927 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.604194641113281e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.891654968261719e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.082389831542969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, plan time: 0.006140470504760742 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.414817810058594e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.465217590332031e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, plan time: 0.005747079849243164 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.0025348663330078125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, plan time: 0.004304409027099609 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.818771362304688e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.249282836914062e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.9898517 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.9895792 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, plan time: 0.00660395622253418 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.989583 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.9895887 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.678436279296875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.989614 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.437301635742188e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.414817810058594e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.2479248046875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.7738037109375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.006715536117553711 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541456.9940093 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.1021575927734375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0417790412902832 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.0319273 rank: 4, write(async) time: 0.042230844497680664 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04220747947692871 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04228639602661133 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.0323098 rank: 1, write(async) time: 0.04261517524719238 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.044188737869262695 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04237532615661621 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.0340414 rank: 13, write(async) time: 0.044580936431884766 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.0324082 rank: 6, write(async) time: 0.04271507263183594 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04464364051818848 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0423886775970459 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.0345943 rank: 15, write(async) time: 0.04513382911682129 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.03249 rank: 3, write(async) time: 0.042790889739990234 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04513740539550781 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.032531 rank: 7, write(async) time: 0.04282212257385254 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.035027 rank: 14, write(async) time: 0.04554343223571777 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04329824447631836 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04543948173522949 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.0334117 rank: 2, write(async) time: 0.04371786117553711 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.0353403 rank: 11, write(async) time: 0.04584622383117676 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04571032524108887 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.0356934 rank: 9, write(async) time: 0.04611039161682129 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.047553062438964844 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.0375848 rank: 8, write(async) time: 0.04796743392944336 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.050939321517944336 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.0412388 rank: 5, write(async) time: 0.05137372016906738 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05617094039916992 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.0461855 rank: 10, write(async) time: 0.0565946102142334 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05598092079162598 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.050382 rank: 0, write(async) time: 0.05637025833129883 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.060456275939941406 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.0504487 rank: 12, write(async) time: 0.060866594314575195 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 1.5735626220703125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.023529052734375 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.024078845977783203 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.024264812469482422 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.03201723098754883 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 1.52587890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 1.621246337890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 1.5735626220703125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 1.9311904907226562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 2.0503997802734375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 1.9073486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 0.02283644676208496 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 0.02371978759765625 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 0.024071455001831055 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 0.023914575576782227 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 0.02532362937927246 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 0.026964902877807617 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 0.03350090980529785 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.0260312557220459 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 0.03293037414550781 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.025191783905029297 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.02633500099182129 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.037206411361694336 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 147456, before: 1848365056, after: 1848512512 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 135168, before: 1852289024, after: 1852424192 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 196608, before: 1912328192, after: 1912524800 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 34140160, before: 2079326208, after: 2113466368 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108900352, before: 1905262592, after: 2014162944 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109244416, before: 1832349696, after: 1941594112 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 105844736, before: 1878827008, after: 1984671744 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 106086400, before: 1840795648, after: 1946882048 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108929024, before: 1874776064, after: 1983705088 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114184192, before: 1874776064, after: 1988960256 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 122081280, before: 1911398400, after: 2033479680 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114388992, before: 1832349696, after: 1946738688 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 113520640, before: 1905262592, after: 2018783232 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110039040, before: 1840795648, after: 1950834688 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114348032, before: 1886461952, after: 2000809984 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 122658816, before: 1911410688, after: 2034069504 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110133248, before: 1914400768, after: 2024534016 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 106016768, before: 1812701184, after: 1918717952 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.449685, rank: 6, write(sync,parallel): 0.3435962200164795 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.4554167, rank: 15, write(sync,parallel): 0.30345582962036133 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.457444, rank: 5, write(sync,parallel): 0.3402884006500244 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.4592755, rank: 7, write(sync,parallel): 0.35318922996520996 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.4620814, rank: 4, write(sync,parallel): 0.3477797508239746 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110006272, before: 1878827008, after: 1988833280 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.41s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110153728, before: 1886461952, after: 1996615680 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.36s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.41s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114163712, before: 1951977472, after: 2066141184 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.42s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 129769472, before: 1892233216, after: 2022002688 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.41s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 130887680, before: 1951977472, after: 2082865152 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114302976, before: 1906905088, after: 2021208064 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110108672, before: 1812701184, after: 1922809856 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.5084708, rank: 13, write(sync,parallel): 0.35878515243530273 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114339840, before: 1914400768, after: 2028740608 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.516906, rank: 11, write(sync,parallel): 0.3646507263183594 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 109711360, before: 1906905088, after: 2016616448 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 113647616, before: 1892233216, after: 2005880832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.5357437, rank: 8, write(sync,parallel): 0.3751981258392334 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.42s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.538395, rank: 14, write(sync,parallel): 0.38408708572387695 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.42s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.5507326, rank: 9, write(sync,parallel): 0.3981783390045166 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212344832, before: 1852289024, after: 2064633856 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.44s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.44s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.5703533, rank: 10, write(sync,parallel): 0.40672826766967773 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.5707943, rank: 12, write(sync,parallel): 0.39990925788879395 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212168704, before: 1912328192, after: 2124496896 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.46s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.6004999, rank: 3, write(sync,parallel): 0.42811131477355957 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.48s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.48s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.6139867, rank: 2, write(sync,parallel): 0.44109320640563965 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212361216, before: 1848365056, after: 2060726272 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.49s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.50s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.653087, rank: 1, write(sync,parallel): 0.4835233688354492 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212246528, before: 2079326208, after: 2291572736 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.54s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541457.713307, rank: 0, write(sync,parallel): 0.49323010444641113 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.57s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7516675, 6, gather: 0.2674117088317871 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7516556, 7, gather: 0.2578246593475342 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7519014, 9, gather: 0.17037630081176758 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7517767, 4, gather: 0.25219225883483887 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7520409, 11, gather: 0.20432639122009277 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7518184, 3, gather: 0.12061929702758789 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7520616, 13, gather: 0.21253728866577148 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7519016, 1, gather: 0.07185244560241699 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7518904, 2, gather: 0.10956430435180664 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7521482, 14, gather: 0.18117308616638184 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7521496, 10, gather: 0.14259123802185059 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7520502, 5, gather: 0.26103711128234863 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7521572, 12, gather: 0.14223194122314453 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.75254, 8, gather: 0.18291854858398438 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.752485, 15, gather: 0.26578545570373535 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541457.7543476, 0, gather: 0.005189418792724609 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0071s