diff --git "a/attnserver.run_attnserver.slurm.sh.343207.out.log" "b/attnserver.run_attnserver.slurm.sh.343207.out.log" --- "a/attnserver.run_attnserver.slurm.sh.343207.out.log" +++ "b/attnserver.run_attnserver.slurm.sh.343207.out.log" @@ -8,3 +8,13044 @@ CHECKPOINT_PATH: gpt-checkpoint PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron -------------------------------- /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 8, data-parallel size: 1, context-parallel size: 1, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 1 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 1024 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 8 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 1024 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 1024 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 8 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +> initialized tensor model parallel with size 8 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +INFO:megatron.training.initialize:Setting logging level to 0 +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.043 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 5.838 seconds +time to initialize megatron (seconds): 11.590 +[after megatron is initialized] datetime: 2025-06-21 21:20:05 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 74511872 + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 74511872 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 74511872 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 74511872 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 74511872 + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 74511872 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 74511872 + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 74511872 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (74511872 elements, 74511872 padded size): + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.word_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.embedding.position_embeddings.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.21, 3.23) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:20:05 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=1024, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.008020 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 66592 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.003559 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 66562 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.003414 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 66686 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:20:05 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (206.87, 208.14) + train/valid/test-data-iterators-setup ..........: (21.85, 116.63) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:20:05 +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: batch tensor after cp:position_ids tokenstorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 0 +Done exporting trace 0 + [2025-06-21 21:20:11] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 5772.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.0703 +Theoretical memory footprints: weight and optimizer=1206.09 MB +[Rank 3] (after 1 iterations) memory (MB) | allocated: 991.75341796875 | max allocated: 991.75439453125 | reserved: 1116.0 | max reserved: 1116.0[Rank 2] (after 1 iterations) memory (MB) | allocated: 991.75341796875 | max allocated: 991.75439453125 | reserved: 1076.0 | max reserved: 1076.0 + +[Rank 7] (after 1 iterations) memory (MB) | allocated: 991.75341796875 | max allocated: 991.75439453125 | reserved: 1076.0 | max reserved: 1076.0[Rank 1] (after 1 iterations) memory (MB) | allocated: 991.75341796875 | max allocated: 991.75439453125 | reserved: 1096.0 | max reserved: 1096.0 + +[Rank 4] (after 1 iterations) memory (MB) | allocated: 991.75341796875 | max allocated: 991.75439453125 | reserved: 1096.0 | max reserved: 1096.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 991.75341796875 | max allocated: 991.75439453125 | reserved: 1116.0 | max reserved: 1116.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 991.75341796875 | max allocated: 991.75439453125 | reserved: 1076.0 | max reserved: 1076.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 991.75341796875 | max allocated: 991.75439453125 | reserved: 1116.0 | max reserved: 1116.0 +batch tensor:batch tensor: tokenstokens torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor:batch tensor: labelslabels torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor:batch tensor: loss_maskloss_mask torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor:batch tensor: attention_maskattention_mask torch.Size([1, 1, 1024, 1024])torch.Size([1, 1, 1024, 1024]) + +batch tensor: batch tensor:position_ids position_ids torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: tokens batch tensor after cp: tokenstorch.Size([1, 1024])batch tensor:batch tensor after cp: +batch tensor: batch tensor:tokens torch.Size([1, 1024])tokens + labelstorch.Size([1, 1024]) batch tensor after cp:torch.Size([1, 1024])tokens + torch.Size([1, 1024]) +labelsbatch tensor after cp: + batch tensor: torch.Size([1, 1024])labels + batch tensor:loss_masktorch.Size([1, 1024])batch tensor after cp:torch.Size([1, 1024]) loss_mask +torch.Size([1, 1024]) + batch tensor:torch.Size([1, 1024])labels +labels batch tensor after cp: + batch tensor:torch.Size([1, 1024]) batch tensor after cp:loss_masktorch.Size([1, 1024]) + attention_maskattention_mask +batch tensor: torch.Size([1, 1024]) batch tensor: +torch.Size([1, 1, 1024, 1024])torch.Size([1, 1, 1024, 1024]) batch tensor after cp: + loss_maskattention_mask +loss_maskbatch tensor after cp: batch tensor: torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024])torch.Size([1, 1024]) position_idsposition_ids + + + batch tensor:batch tensor after cp:torch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor: + attention_mask position_idsattention_masktorch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024])batch tensor: + + batch tensor after cp:batch tensor:position_ids batch tensor:tokens position_idstorch.Size([1, 1024])torch.Size([1, 1024]) +batch tensor after cp:tokens +labelstorch.Size([1, 1024]) + torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor:batch tensor after cp: labelsattention_mask torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: +batch tensor: batch tensor after cp: tokens loss_maskposition_ids batch tensor after cp:torch.Size([1, 1024]) torch.Size([1, 1024]) + torch.Size([1, 1024])batch tensor after cp: +tokens + labelsbatch tensor:torch.Size([1, 1024]) +attention_masktorch.Size([1, 1024])batch tensor after cp: batch tensor: + torch.Size([1, 1, 1024, 1024]) batch tensor after cp:labels +tokens batch tensor: torch.Size([1, 1024])loss_mask + position_idsbatch tensor after cp: torch.Size([1, 1024])torch.Size([1, 1024]) + +torch.Size([1, 1024])loss_maskbatch tensor after cp:batch tensor: + torch.Size([1, 1024]) attention_mask +labels batch tensor after cp:batch tensor:torch.Size([1, 1, 1024, 1024]) torch.Size([1, 1024])attention_mask + + batch tensor after cp:batch tensor:torch.Size([1, 1, 1024, 1024]) tokens +position_idsloss_maskbatch tensor after cp: batch tensor after cp: torch.Size([1, 1024])torch.Size([1, 1024])position_ids torch.Size([1, 1024]) + + tokens +torch.Size([1, 1024])batch tensor: + torch.Size([1, 1024])batch tensor:attention_mask + batch tensor after cp:labelstorch.Size([1, 1, 1024, 1024]) labelstorch.Size([1, 1024]) + +torch.Size([1, 1024])batch tensor:batch tensor: + batch tensor after cp: position_ids loss_maskloss_masktorch.Size([1, 1024]) +torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor:batch tensor after cp: attention_maskattention_mask torch.Size([1, 1, 1024, 1024])torch.Size([1, 1, 1024, 1024]) + +batch tensor:batch tensor after cp: position_idsposition_idsbatch tensor after cp: torch.Size([1, 1024])torch.Size([1, 1024])tokens + + torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp:batch tensor after cp: position_idstokens torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:20:11] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 66.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024])batch tensor: +batch tensor: position_ids tokens torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask batch tensor after cp:torch.Size([1, 1024]) +tokens batch tensor:torch.Size([1, 1024]) +attention_maskbatch tensor after cp: labelstorch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1024]) +batch tensor: batch tensor after cp:position_ids loss_masktorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: batch tensor:tokens torch.Size([1, 1024]) +tokensbatch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp:torch.Size([1, 1024]) loss_mask + torch.Size([1, 1024]) +batch tensor: batch tensor after cp:labels attention_mask torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024]) + +batch tensor:batch tensor after cp: loss_maskposition_ids torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor:batch tensor: position_ids torch.Size([1, 1024]) +tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_maskbatch tensor after cp: tokenstorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor:batch tensor after cp: attention_masklabels torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024]) + +batch tensor after cp:batch tensor: loss_maskposition_ids torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor:batch tensor after cp: position_ids torch.Size([1, 1024]) +tokens batch tensor after cp: tokens torch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor after cp: +labels batch tensor:torch.Size([1, 1024]) +labelsbatch tensor after cp: torch.Size([1, 1024])loss_mask + batch tensor:torch.Size([1, 1024]) +loss_maskbatch tensor after cp: torch.Size([1, 1024])attention_mask + torch.Size([1, 1, 1024, 1024])batch tensor: + attention_maskbatch tensor after cp: position_idstorch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor after cp: batch tensor:tokens labelstorch.Size([1, 1024]) +batch tensor after cp:torch.Size([1, 1024]) +labelsbatch tensor: torch.Size([1, 1024])loss_mask + batch tensor after cp:torch.Size([1, 1024]) +loss_mask batch tensor:torch.Size([1, 1024]) +attention_maskbatch tensor after cp: torch.Size([1, 1, 1024, 1024])attention_mask + torch.Size([1, 1, 1024, 1024])batch tensor: + batch tensor after cp:position_ids position_idstorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:20:11] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 29.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp:batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +tokensbatch tensor after cp: position_ids torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024])batch tensor: +batch tensor: attention_masktokens torch.Size([1, 1, 1024, 1024]) +batch tensor:batch tensor: torch.Size([1, 1024])position_ids + tokensbatch tensor:torch.Size([1, 1024]) +labels torch.Size([1, 1024]) +batch tensor: torch.Size([1, 1024])loss_mask + torch.Size([1, 1024])batch tensor: + labelsbatch tensor: batch tensor after cp:torch.Size([1, 1024]) batch tensor:attention_mask + tokensbatch tensor: torch.Size([1, 1, 1024, 1024]) torch.Size([1, 1024]) +batch tensor:batch tensor after cp: +loss_masktokens batch tensor: labels tokenstorch.Size([1, 1024]) +torch.Size([1, 1024]) + batch tensor:position_idsbatch tensor:torch.Size([1, 1024]) torch.Size([1, 1024]) +batch tensor: +attention_masklabelsbatch tensor after cp:torch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024])labelsloss_mask +batch tensor: +batch tensor:torch.Size([1, 1024]) batch tensor: torch.Size([1, 1024])batch tensor after cp: +position_ids batch tensor:loss_mask torch.Size([1, 1024]) +tokens + batch tensor:tokensbatch tensor after cp:torch.Size([1, 1024])loss_mask torch.Size([1, 1024]) + + attention_masktorch.Size([1, 1024])torch.Size([1, 1024]) +attention_maskbatch tensor:batch tensor: + torch.Size([1, 1, 1024, 1024])batch tensor after cp:batch tensor after cp:torch.Size([1, 1, 1024, 1024])attention_mask + labels + tokensbatch tensor after cp:torch.Size([1, 1, 1024, 1024]) batch tensor: labels +torch.Size([1, 1024]) torch.Size([1, 1024]) +torch.Size([1, 1024])position_idsbatch tensor: +position_idsbatch tensor after cp: batch tensor: + torch.Size([1, 1024]) batch tensor after cp:position_ids labelstorch.Size([1, 1024]) + +loss_mask loss_mask torch.Size([1, 1024])torch.Size([1, 1024])torch.Size([1, 1024])torch.Size([1, 1024]) + + +batch tensor after cp: +batch tensor after cp: batch tensor after cp:loss_mask batch tensor: attention_masktokens batch tensor after cp:torch.Size([1, 1024]) attention_mask +tokenstorch.Size([1, 1, 1024, 1024])torch.Size([1, 1024]) batch tensor after cp: +torch.Size([1, 1024]) + torch.Size([1, 1, 1024, 1024])batch tensor after cp: + +attention_maskbatch tensor after cp:batch tensor:position_ids position_ids batch tensor after cp: labels torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024]) labels + + +batch tensor after cp:torch.Size([1, 1024]) +position_idstorch.Size([1, 1024])batch tensor after cp: +torch.Size([1, 1024])loss_mask + batch tensor after cp:torch.Size([1, 1024]) batch tensor after cp: +loss_mask batch tensor after cp:tokens torch.Size([1, 1024])torch.Size([1, 1024])attention_mask + + batch tensor after cp:batch tensor after cp:torch.Size([1, 1, 1024, 1024]) + labelsbatch tensor after cp:attention_mask torch.Size([1, 1024]) +position_idstorch.Size([1, 1, 1024, 1024])batch tensor after cp: + torch.Size([1, 1024])batch tensor after cp:loss_mask + position_idstorch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor after cp: + attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:20:11] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 28.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor:batch tensor: labels torch.Size([1, 1024])tokens + batch tensor: loss_mask torch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor: + attention_mask batch tensor: torch.Size([1, 1, 1024, 1024])labels + batch tensor:torch.Size([1, 1024]) +position_ids batch tensor:torch.Size([1, 1024]) +loss_mask torch.Size([1, 1024]) +batch tensor: batch tensor:attention_mask torch.Size([1, 1, 1024, 1024])tokens + batch tensor:batch tensor after cp: position_ids torch.Size([1, 1024])tokenstorch.Size([1, 1024]) + + torch.Size([1, 1024])batch tensor: + batch tensor after cp:labels labelstorch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor:batch tensor after cp: + batch tensor after cp:loss_masktokens loss_masktorch.Size([1, 1024])torch.Size([1, 1024]) + +torch.Size([1, 1024]) +batch tensor:batch tensor:batch tensor after cp: batch tensor after cp: attention_masktokenslabels torch.Size([1, 1024])attention_masktorch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1024]) + +batch tensor:torch.Size([1, 1, 1024, 1024])batch tensor after cp: +batch tensor: position_idsbatch tensor after cp: loss_mask labels position_ids torch.Size([1, 1024]) torch.Size([1, 1024])torch.Size([1, 1024]) + +torch.Size([1, 1024]) + +batch tensor after cp:batch tensor: attention_maskloss_mask torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024]) + +batch tensor after cp:batch tensor: position_idsattention_mask torch.Size([1, 1024])batch tensor after cp: +torch.Size([1, 1, 1024, 1024]) +tokensbatch tensor: torch.Size([1, 1024])position_ids +torch.Size([1, 1024])batch tensor after cp: + labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp:batch tensor after cp: position_idstokens torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:20:11] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 30.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor:batch tensor: tokenstokens batch tensor:torch.Size([1, 1024])torch.Size([1, 1024]) +tokens + batch tensor: batch tensor:labels torch.Size([1, 1024])labels +torch.Size([1, 1024]) batch tensor: + torch.Size([1, 1024])loss_mask + batch tensor:batch tensor: torch.Size([1, 1024]) labels +loss_mask batch tensor:torch.Size([1, 1024])torch.Size([1, 1024]) + +attention_maskbatch tensor: batch tensor: loss_masktorch.Size([1, 1, 1024, 1024])attention_mask + torch.Size([1, 1024])batch tensor: + torch.Size([1, 1, 1024, 1024])position_ids + batch tensor:torch.Size([1, 1024])batch tensor: + attention_maskposition_ids torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024]) + +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor:batch tensor after cp:batch tensor after cp: labelstokens batch tensor after cp:torch.Size([1, 1024]) +tokenstorch.Size([1, 1024])tokens batch tensor after cp: + batch tensor after cp:loss_masktorch.Size([1, 1024]) labels +torch.Size([1, 1024])torch.Size([1, 1024]) batch tensor after cp: + +torch.Size([1, 1024]) batch tensor after cp: +batch tensor: labels attention_maskbatch tensor after cp: labels torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024])loss_masktorch.Size([1, 1024]) + + +batch tensor after cp:torch.Size([1, 1024]) batch tensor after cp:position_idsbatch tensor: + batch tensor after cp: loss_maskloss_mask torch.Size([1, 1024]) +torch.Size([1, 1024])attention_masktorch.Size([1, 1024])batch tensor: + +batch tensor after cp: torch.Size([1, 1, 1024, 1024])batch tensor: +tokens attention_maskbatch tensor after cp: attention_mask position_idstorch.Size([1, 1, 1024, 1024])batch tensor:torch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1024]) +batch tensor:torch.Size([1, 1024]) batch tensor after cp:tokens torch.Size([1, 1024]) + + +position_ids position_ids torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor:batch tensor: labels labels batch tensor after cp:torch.Size([1, 1024]) +torch.Size([1, 1024])tokens +batch tensor:torch.Size([1, 1024]) batch tensor: +loss_mask batch tensor after cp:loss_mask torch.Size([1, 1024])labelstorch.Size([1, 1024])batch tensor: + + batch tensor:batch tensor:torch.Size([1, 1024]) tokens attention_mask +attention_mask batch tensor after cp: torch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1, 1024, 1024])loss_mask batch tensor:torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: +position_ids batch tensor after cp: torch.Size([1, 1024])position_idsbatch tensor: attention_mask + torch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024]) +labelsbatch tensor after cp: batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024])torch.Size([1, 1024]) +batch tensor after cp: +tokensbatch tensor: batch tensor after cp: tokens loss_masklabelstorch.Size([1, 1024])torch.Size([1, 1024]) + +torch.Size([1, 1024])torch.Size([1, 1024])batch tensor after cp: + +batch tensor:batch tensor: batch tensor after cp: labelsattention_mask labels loss_mask torch.Size([1, 1024])torch.Size([1, 1024])torch.Size([1, 1024]) + + +torch.Size([1, 1, 1024, 1024])batch tensor after cp:batch tensor: +batch tensor after cp: loss_maskbatch tensor: loss_mask attention_mask position_ids torch.Size([1, 1024])torch.Size([1, 1024]) torch.Size([1, 1, 1024, 1024]) + + +torch.Size([1, 1024])batch tensor:batch tensor after cp: + batch tensor after cp: attention_mask attention_maskposition_ids torch.Size([1, 1, 1024, 1024])torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024]) + + +batch tensor after cp:batch tensor: position_idsbatch tensor after cp: position_ids torch.Size([1, 1024]) tokens +torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp:batch tensor after cp: position_idstokens torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:20:11] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 28.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor:batch tensor:batch tensor: tokenstokenstokens torch.Size([1, 1024])torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: +batch tensor: labels batch tensor:batch tensor:labelstorch.Size([1, 1024]) labels + torch.Size([1, 1024])tokens +batch tensor:torch.Size([1, 1024]) +batch tensor:loss_maskbatch tensor: loss_masktorch.Size([1, 1024])loss_mask torch.Size([1, 1024]) + +torch.Size([1, 1024])torch.Size([1, 1024])batch tensor: +batch tensor: + batch tensor: batch tensor:attention_masklabels attention_mask attention_masktorch.Size([1, 1024])torch.Size([1, 1, 1024, 1024])torch.Size([1, 1, 1024, 1024]) + batch tensor: + +batch tensor:torch.Size([1, 1, 1024, 1024])batch tensor:batch tensor: batch tensor: +loss_mask tokensposition_ids tokenstorch.Size([1, 1024])batch tensor:position_ids + torch.Size([1, 1024])batch tensor:torch.Size([1, 1024])torch.Size([1, 1024])position_ids +torch.Size([1, 1024])batch tensor: + + attention_mask +batch tensor: torch.Size([1, 1024]) batch tensor:labelstokens +torch.Size([1, 1, 1024, 1024])batch tensor after cp: labels +torch.Size([1, 1024])batch tensor after cp:tokenstorch.Size([1, 1024]) + +batch tensor:batch tensor after cp:batch tensor:torch.Size([1, 1024]) torch.Size([1, 1024])position_idsbatch tensor: torch.Size([1, 1024])loss_mask loss_masktorch.Size([1, 1024]) +torch.Size([1, 1024]) + +tokens + batch tensor:torch.Size([1, 1024]) +tokensbatch tensor:batch tensor: batch tensor after cp:batch tensor: + attention_maskbatch tensor after cp:attention_mask labels labelstorch.Size([1, 1024]) batch tensor after cp:torch.Size([1, 1, 1024, 1024]) +labels torch.Size([1, 1, 1024, 1024]) +tokens batch tensor:torch.Size([1, 1024]) +torch.Size([1, 1024])tokensbatch tensor after cp: + batch tensor:torch.Size([1, 1024]) + position_ids + batch tensor after cp: labelsbatch tensor: batch tensor after cp:position_ids torch.Size([1, 1024]) torch.Size([1, 1024])torch.Size([1, 1024])torch.Size([1, 1024]) + + + loss_maskloss_maskloss_mask torch.Size([1, 1024]) + +batch tensor after cp:batch tensor after cp:batch tensor: torch.Size([1, 1024])torch.Size([1, 1024]) torch.Size([1, 1024]) +labels + batch tensor after cp:loss_mask +labelsbatch tensor after cp:batch tensor after cp: batch tensor after cp:torch.Size([1, 1024]) + batch tensor:batch tensor after cp:tokens torch.Size([1, 1024]) tokenstorch.Size([1, 1024]) attention_maskloss_mask +torch.Size([1, 1024])attention_mask + batch tensor: +attention_masktorch.Size([1, 1024]) + batch tensor after cp:batch tensor after cp:torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024]) batch tensor after cp:torch.Size([1, 1, 1024, 1024])torch.Size([1, 1, 1024, 1024]) + + +loss_maskattention_mask +labelsbatch tensor after cp:batch tensor after cp:labelsbatch tensor after cp: batch tensor: torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024])attention_mask + torch.Size([1, 1024])position_idsposition_idstorch.Size([1, 1024]) + batch tensor after cp:position_ids + torch.Size([1, 1024]) + +batch tensor:torch.Size([1, 1, 1024, 1024]) batch tensor after cp:torch.Size([1, 1024])batch tensor after cp: +position_idstorch.Size([1, 1024]) + attention_maskloss_maskbatch tensor after cp: + torch.Size([1, 1024])loss_mask torch.Size([1, 1024]) + + torch.Size([1, 1, 1024, 1024])position_idsbatch tensor after cp: torch.Size([1, 1024]) +attention_masktorch.Size([1, 1024])batch tensor: + +batch tensor after cp: batch tensor after cp: position_ids attention_masktorch.Size([1, 1, 1024, 1024]) tokenstorch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024]) batch tensor after cp: + + batch tensor after cp:position_ids torch.Size([1, 1024]) position_ids +torch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor after cp: +labels torch.Size([1, 1024]) +batch tensor after cp:batch tensor after cp: tokens loss_masktorch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor after cp: + batch tensor after cp:labels attention_mask torch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024])batch tensor after cp: + batch tensor after cp:loss_mask position_idstorch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor after cp: +attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:20:11] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 27.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_maskbatch tensor: torch.Size([1, 1024]) + tokensbatch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor:torch.Size([1, 1024]) position_ids + torch.Size([1, 1024])batch tensor: + labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask batch tensor after cp:batch tensor:batch tensor:torch.Size([1, 1, 1024, 1024]) +batch tensor:tokens tokenstorch.Size([1, 1024]) batch tensor:tokensbatch tensor: batch tensor: +tokenstorch.Size([1, 1024]) batch tensor after cp:position_ids + labelsbatch tensor: batch tensor: tokenstokens torch.Size([1, 1024])torch.Size([1, 1024]) +torch.Size([1, 1024])torch.Size([1, 1024]) + +labels torch.Size([1, 1024]) + +batch tensor after cp:batch tensor:batch tensor: tokens batch tensor:labels torch.Size([1, 1024])batch tensor after cp:torch.Size([1, 1024]) + loss_maskbatch tensor: labels labelslabels +torch.Size([1, 1024]) torch.Size([1, 1024])torch.Size([1, 1024]) +tokenstorch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor after cp: +batch tensor: +torch.Size([1, 1024])batch tensor: +batch tensor:torch.Size([1, 1024]) batch tensor:attention_mask +loss_maskbatch tensor: +loss_masklabels loss_maskbatch tensor: batch tensor after cp: torch.Size([1, 1024])torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024]) loss_masktorch.Size([1, 1024])labels + + + +torch.Size([1, 1024])loss_mask batch tensor: +batch tensor after cp: torch.Size([1, 1024])position_idsbatch tensor: loss_maskbatch tensor:torch.Size([1, 1024]) batch tensor: +torch.Size([1, 1024]) +torch.Size([1, 1024]) +attention_mask batch tensor after cp:torch.Size([1, 1024]) +loss_maskattention_maskbatch tensor: attention_masktorch.Size([1, 1, 1024, 1024]) +batch tensor: torch.Size([1, 1024])attention_masktorch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1, 1024, 1024])batch tensor: + +torch.Size([1, 1, 1024, 1024])batch tensor: + attention_maskbatch tensor after cp: batch tensor: torch.Size([1, 1, 1024, 1024])batch tensor: + attention_mask attention_maskposition_ids + batch tensor:position_ids position_idsbatch tensor:torch.Size([1, 1024]) + torch.Size([1, 1, 1024, 1024])torch.Size([1, 1, 1024, 1024]) + +torch.Size([1, 1024]) +torch.Size([1, 1024])position_idsbatch tensor: batch tensor after cp:torch.Size([1, 1024])position_ids + batch tensor after cp: + position_idsposition_ids batch tensor after cp: torch.Size([1, 1024])torch.Size([1, 1024]) +tokens batch tensor after cp:torch.Size([1, 1024]) + +tokenstorch.Size([1, 1024]) +batch tensor after cp: batch tensor after cp:tokens batch tensor after cp: torch.Size([1, 1024]) labels tokenstorch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor after cp:tokens +torch.Size([1, 1024])batch tensor after cp:batch tensor after cp: + batch tensor after cp: +torch.Size([1, 1024]) loss_masklabelstokens batch tensor after cp:labels + labelsbatch tensor after cp:torch.Size([1, 1024])torch.Size([1, 1024]) +torch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor after cp: + +batch tensor after cp:batch tensor after cp:labels labelsbatch tensor after cp:torch.Size([1, 1024]) attention_mask +torch.Size([1, 1024])loss_mask torch.Size([1, 1024])loss_maskbatch tensor after cp: + torch.Size([1, 1, 1024, 1024]) + batch tensor after cp:torch.Size([1, 1024]) +batch tensor after cp: torch.Size([1, 1024]) +loss_mask + batch tensor after cp:loss_mask batch tensor after cp:loss_maskbatch tensor after cp:torch.Size([1, 1024]) + position_idsbatch tensor after cp: torch.Size([1, 1024])attention_maskattention_masktorch.Size([1, 1024]) +attention_masktorch.Size([1, 1, 1024, 1024])torch.Size([1, 1024]) + batch tensor after cp: + +torch.Size([1, 1, 1024, 1024])torch.Size([1, 1, 1024, 1024]) +attention_maskbatch tensor after cp:batch tensor after cp: + batch tensor after cp: position_idsbatch tensor after cp:torch.Size([1, 1, 1024, 1024])attention_maskposition_ids +position_idsbatch tensor after cp: position_ids torch.Size([1, 1024]) torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024])torch.Size([1, 1024]) + +torch.Size([1, 1024])batch tensor after cp: + + + position_ids torch.Size([1, 1024]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:20:11] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 28.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens batch tensor:torch.Size([1, 1024]) +tokens batch tensor: labels torch.Size([1, 1024]) +batch tensor:torch.Size([1, 1024]) +loss_mask torch.Size([1, 1024]) +batch tensor: batch tensor:labels attention_mask torch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024])batch tensor: +batch tensor:batch tensor: loss_maskposition_ids torch.Size([1, 1024]) +torch.Size([1, 1024]) +tokensbatch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor:torch.Size([1, 1024]) position_ids + torch.Size([1, 1024])batch tensor: + batch tensor after cp:labels batch tensor:tokenstorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor:batch tensor after cp:tokensbatch tensor after cp: loss_mask tokens labels torch.Size([1, 1024]) torch.Size([1, 1024])torch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor:batch tensor: + +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask batch tensor: +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: batch tensor:tokenstorch.Size([1, 1024]) attention_masktokens +attention_masklabelsbatch tensor after cp: attention_masktorch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024])batch tensor: torch.Size([1, 1, 1024, 1024]) +batch tensor: batch tensor after cp:torch.Size([1, 1024]) torch.Size([1, 1024])position_ids + torch.Size([1, 1, 1024, 1024]) tokens + + +torch.Size([1, 1024])batch tensor: labelsbatch tensor after cp: +batch tensor:batch tensor: batch tensor:loss_mask position_idstorch.Size([1, 1024]) +position_idstorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor:batch tensor: labels torch.Size([1, 1024])batch tensor:torch.Size([1, 1024]) labels attention_maskloss_mask tokens torch.Size([1, 1024]) + + torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024]) + +batch tensor:torch.Size([1, 1024]) +batch tensor:batch tensor after cp: torch.Size([1, 1024]) + +batch tensor: attention_masktokensbatch tensor: position_idsbatch tensor:loss_mask torch.Size([1, 1, 1024, 1024]) torch.Size([1, 1024])loss_masktorch.Size([1, 1024]) + +labelstorch.Size([1, 1024]) + torch.Size([1, 1024])batch tensor: + batch tensor after cp:torch.Size([1, 1024]) +batch tensor after cp:position_idsbatch tensor: +labelsbatch tensor: batch tensor:tokens attention_masktorch.Size([1, 1024])attention_masktorch.Size([1, 1024])torch.Size([1, 1024]) +loss_mask + +torch.Size([1, 1, 1024, 1024])batch tensor after cp: + torch.Size([1, 1, 1024, 1024])batch tensor after cp: batch tensor:torch.Size([1, 1024]) +loss_masklabels +batch tensor: torch.Size([1, 1024])torch.Size([1, 1024])batch tensor after cp: +position_ids batch tensor: +batch tensor after cp: batch tensor after cp: position_ids tokensloss_mask torch.Size([1, 1024]) +torch.Size([1, 1024])attention_masktorch.Size([1, 1024]) attention_masktorch.Size([1, 1024])batch tensor after cp: + +torch.Size([1, 1, 1024, 1024]) torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: +labelsbatch tensor: + batch tensor after cp: attention_maskbatch tensor after cp:torch.Size([1, 1024])position_idstokens +batch tensor after cp:torch.Size([1, 1024])torch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024])position_idsbatch tensor after cp: +tokens + batch tensor after cp:torch.Size([1, 1024])batch tensor after cp: +torch.Size([1, 1024])loss_mask +batch tensor after cp: tokensposition_idsbatch tensor after cp:labels labelstorch.Size([1, 1024])torch.Size([1, 1024])torch.Size([1, 1024])torch.Size([1, 1024]) + + + +torch.Size([1, 1024])batch tensor after cp:batch tensor after cp:batch tensor after cp: + batch tensor after cp:attention_masklabelsloss_mask loss_mask torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024])torch.Size([1, 1024]) + + +torch.Size([1, 1024])batch tensor after cp:batch tensor after cp:batch tensor after cp: + loss_mask batch tensor after cp:position_ids attention_masktorch.Size([1, 1024])attention_masktorch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024]) +batch tensor after cp:torch.Size([1, 1, 1024, 1024]) + +batch tensor after cp:attention_maskbatch tensor after cp: position_idstorch.Size([1, 1, 1024, 1024]) position_ids +torch.Size([1, 1024]) batch tensor after cp:torch.Size([1, 1024]) + position_ids + torch.Size([1, 1024]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:20:11] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 25.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids batch tensor:torch.Size([1, 1024]) + tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor after cp: batch tensor:tokens loss_maskbatch tensor:torch.Size([1, 1024]) + torch.Size([1, 1024])batch tensor after cp:tokensbatch tensor: + labels tokensbatch tensor:torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp:attention_mask loss_masktorch.Size([1, 1024])batch tensor:torch.Size([1, 1, 1024, 1024]) + + torch.Size([1, 1024])batch tensor: +batch tensor:batch tensor after cp:batch tensor:labels position_ids tokenslabelsattention_mask torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1024]) torch.Size([1, 1024]) + + torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024])batch tensor after cp:torch.Size([1, 1, 1024, 1024]) batch tensor: +batch tensor: +tokens batch tensor:batch tensor after cp:loss_mask position_idslabels batch tensor after cp:torch.Size([1, 1024]) torch.Size([1, 1024])tokenstorch.Size([1, 1024]) +tokens + torch.Size([1, 1024])batch tensor after cp: + +batch tensor:torch.Size([1, 1024])labelsbatch tensor: +attention_masktorch.Size([1, 1024]) batch tensor after cp:torch.Size([1, 1024]) + +torch.Size([1, 1, 1024, 1024])batch tensor:loss_masklabelsbatch tensor after cp: +loss_mask torch.Size([1, 1024])labels +batch tensor:torch.Size([1, 1024])torch.Size([1, 1024]) + batch tensor after cp:torch.Size([1, 1024]) +position_ids +batch tensor after cp:loss_maskbatch tensor: batch tensor: torch.Size([1, 1024]) attention_mask + torch.Size([1, 1024]) +attention_maskloss_masktorch.Size([1, 1, 1024, 1024])batch tensor after cp: +torch.Size([1, 1, 1024, 1024])attention_mask batch tensor after cp:batch tensor after cp:torch.Size([1, 1024]) + torch.Size([1, 1, 1024, 1024]) + +batch tensor:batch tensor after cp: tokensposition_ids torch.Size([1, 1024]) +batch tensor:position_ids position_ids attention_masktorch.Size([1, 1024])torch.Size([1, 1024])torch.Size([1, 1024]) + +torch.Size([1, 1, 1024, 1024]) + +batch tensor after cp: labelsbatch tensor: torch.Size([1, 1024]) +position_idsbatch tensor after cp: batch tensor after cp:torch.Size([1, 1024]) +loss_masktokens torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp: batch tensor after cp:attention_mask labelsbatch tensor after cp: torch.Size([1, 1, 1024, 1024]) torch.Size([1, 1024])tokens + + batch tensor after cp:torch.Size([1, 1024])batch tensor after cp: +batch tensor after cp:position_ids loss_mask labelstorch.Size([1, 1024]) +torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor after cp: batch tensor after cp:attention_mask loss_mask torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024]) + +batch tensor after cp:batch tensor after cp: position_idsattention_mask torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024]) + +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp:batch tensor: labels torch.Size([1, 1024])tokens + batch tensor after cp: loss_mask torch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor after cp: +attention_mask torch.Size([1, 1, 1024, 1024])batch tensor: + batch tensor after cp:labels position_ids torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:20:11] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 27.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:20:11 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.05024003982543945 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.050267934799194336 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.0502469539642334 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.050340890884399414 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.05033612251281738 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.050345659255981445 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.05038189888000488 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.05056023597717285 to prepare state dict for ckpt +WARNING:megatron.core.dist_checkpointing.serialization:Overwriting old incomplete / corrupted checkpoint... +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +Running ctx_length=2048, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 2048 +TP_SIZE: 8 +CP_SIZE: 1 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +using world size: 8, data-parallel size: 1, context-parallel size: 1, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 1 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 2048 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 8 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 2048 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 2048 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 8 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +> initialized tensor model parallel with size 8 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.052 seconds +> compiling and loading fused kernels ... +INFO:megatron.training.initialize:Setting logging level to 0 +>>> done with compiling and loading fused kernels. Compilation time: 2.528 seconds +time to initialize megatron (seconds): 7.571 +[after megatron is initialized] datetime: 2025-06-21 21:21:21 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 78706176 +>>> embedding>>> embedding + +>>> decoder>>> decoder + +>>> output_layer>>> output_layer + +>>> embedding>>> embedding + +>>> decoder +>>> decoder>>> output_layer + +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 78706176 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (78706176 elements, 78706176 padded size): + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.final_layernorm.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.embedding.position_embeddings.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine + loading distributed checkpoint from gpt-checkpoint at iteration 10 +Running ctx_length=4096, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 8 +CP_SIZE: 1 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 8, data-parallel size: 1, context-parallel size: 1, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 1 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 4096 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 8 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 4096 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 4096 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 8 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +> initialized tensor model parallel with size 8 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.040 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.805 seconds +time to initialize megatron (seconds): 7.539 +[after megatron is initialized] datetime: 2025-06-21 21:22:03 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 87094784 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (87094784 elements, 87094784 padded size): + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.embedding.position_embeddings.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.22, 3.24) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:22:04 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=4096, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005362 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16648 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002220 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16640 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002108 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16671 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:22:04 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (410.93, 422.91) + train/valid/test-data-iterators-setup ..........: (32.71, 122.73) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:22:04 +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 [2025-06-21 21:22:12] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 8782.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | + +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.0703 +Theoretical memory footprints: weight and optimizer=1206.09 MB +[Rank 6] (after 1 iterations) memory (MB) | allocated: 1150.83544921875 | max allocated: 1393.0400390625 | reserved: 1664.0 | max reserved: 1664.0[Rank 5] (after 1 iterations) memory (MB) | allocated: 1150.83544921875 | max allocated: 1393.0400390625 | reserved: 1696.0 | max reserved: 1696.0 + +[Rank 1] (after 1 iterations) memory (MB) | allocated: 1150.83544921875 | max allocated: 1393.0400390625 | reserved: 1732.0 | max reserved: 1732.0[Rank 2] (after 1 iterations) memory (MB) | allocated: 1150.83544921875 | max allocated: 1393.0400390625 | reserved: 1664.0 | max reserved: 1664.0 + +[Rank 3] (after 1 iterations) memory (MB) | allocated: 1150.83544921875 | max allocated: 1393.0400390625 | reserved: 1750.0 | max reserved: 1750.0[Rank 0] (after 1 iterations) memory (MB) | allocated: 1150.83544921875 | max allocated: 1393.0400390625 | reserved: 1766.0 | max reserved: 1766.0 + +[Rank 4] (after 1 iterations) memory (MB) | allocated: 1150.83544921875 | max allocated: 1393.0400390625 | reserved: 1750.0 | max reserved: 1750.0[Rank 7] (after 1 iterations) memory (MB) | allocated: 1150.83544921875 | max allocated: 1393.0400390625 | reserved: 1696.0 | max reserved: 1696.0 + +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp:batch tensor: tokens torch.Size([1, 4096])tokens + batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor after cp: batch tensor:attention_mask labels torch.Size([1, 1, 4096, 4096])torch.Size([1, 4096]) + +batch tensor after cp:batch tensor: position_idsloss_mask torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:22:13] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 113.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokensbatch tensor: tokens torch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor: +labelsbatch tensor: torch.Size([1, 4096])labels + batch tensor:torch.Size([1, 4096]) +loss_maskbatch tensor: loss_mask torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor:batch tensor: attention_maskattention_mask torch.Size([1, 1, 4096, 4096]) +torch.Size([1, 1, 4096, 4096]) +batch tensor: batch tensor:position_ids position_idstorch.Size([1, 4096]) + torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp:batch tensor after cp: labels tokenstorch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor after cp: + loss_mask torch.Size([1, 4096])batch tensor after cp: + labelsbatch tensor after cp: attention_masktorch.Size([1, 4096]) +torch.Size([1, 1, 4096, 4096])batch tensor after cp: + batch tensor after cp:loss_mask position_ids torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens batch tensor:torch.Size([1, 4096]) + tokensbatch tensor: labels torch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor: + loss_mask batch tensor:torch.Size([1, 4096]) +labels torch.Size([1, 4096])batch tensor: + attention_maskbatch tensor: loss_mask torch.Size([1, 1, 4096, 4096])torch.Size([1, 4096]) + +batch tensor:batch tensor: attention_maskposition_ids torch.Size([1, 1, 4096, 4096]) +torch.Size([1, 4096])batch tensor: + position_ids torch.Size([1, 4096]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor after cp:batch tensor after cp: batch tensor:labelslabels torch.Size([1, 4096]) torch.Size([1, 4096]) + +tokensbatch tensor after cp:batch tensor after cp: loss_maskloss_mask torch.Size([1, 4096])torch.Size([1, 4096]) + +torch.Size([1, 4096])batch tensor after cp:batch tensor after cp: + attention_maskattention_mask torch.Size([1, 1, 4096, 4096])batch tensor:torch.Size([1, 1, 4096, 4096]) + +labelsbatch tensor after cp:batch tensor after cp: torch.Size([1, 4096])position_idsposition_ids + torch.Size([1, 4096])torch.Size([1, 4096])batch tensor: + + loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:22:13] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 84.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask batch tensor:torch.Size([1, 1, 4096, 4096])batch tensor: + batch tensor: tokensposition_ids tokenstorch.Size([1, 4096]) +torch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor: + labels batch tensor:torch.Size([1, 4096]) + batch tensor:labels loss_mask torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor:batch tensor: attention_maskloss_maskbatch tensor after cp: tokenstorch.Size([1, 1, 4096, 4096]) torch.Size([1, 4096]) +torch.Size([1, 4096]) +batch tensor: + batch tensor:position_idsbatch tensor after cp: labelsattention_mask torch.Size([1, 4096])torch.Size([1, 4096]) + +torch.Size([1, 1, 4096, 4096])batch tensor after cp: +loss_mask batch tensor:torch.Size([1, 4096]) +position_idsbatch tensor after cp: torch.Size([1, 4096])attention_mask +torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: batch tensor after cp:tokens attention_mask torch.Size([1, 4096])torch.Size([1, 1, 4096, 4096]) + +batch tensor after cp:batch tensor after cp: labelsposition_ids torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: batch tensor:attention_mask torch.Size([1, 1, 4096, 4096]) +tokensbatch tensor after cp: position_ids torch.Size([1, 4096]) +torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096])batch tensor: +batch tensor after cp: labels tokenstorch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor after cp: +attention_mask batch tensor:torch.Size([1, 1, 4096, 4096]) +labelsbatch tensor after cp: position_idstorch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor: + loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:22:13] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 51.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor:batch tensor: loss_mask torch.Size([1, 4096])tokens + batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +torch.Size([1, 4096])batch tensor: + position_idsbatch tensor: torch.Size([1, 4096])labels + torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask batch tensor after cp:torch.Size([1, 1, 4096, 4096]) +tokensbatch tensor after cp: position_idstorch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor after cp: + labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:22:13] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 56.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor:batch tensor: tokenstokens torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor:batch tensor: labelslabels torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor:batch tensor: loss_maskloss_mask torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor:batch tensor: attention_maskattention_mask batch tensor: torch.Size([1, 1, 4096, 4096]) +torch.Size([1, 1, 4096, 4096])batch tensor: + batch tensor:tokensposition_ids position_ids torch.Size([1, 4096])torch.Size([1, 4096]) + +torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask batch tensor after cp:batch tensor after cp: torch.Size([1, 1, 4096, 4096]) tokens + tokenstorch.Size([1, 4096]) batch tensor: +torch.Size([1, 4096]) +batch tensor after cp:position_idsbatch tensor after cp: labelstorch.Size([1, 4096])labels + torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor after cp:batch tensor after cp: loss_maskloss_mask torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor after cp:batch tensor after cp: attention_maskattention_mask torch.Size([1, 1, 4096, 4096])torch.Size([1, 1, 4096, 4096]) + +batch tensor after cp:batch tensor after cp:batch tensor: position_idsposition_ids torch.Size([1, 4096]) torch.Size([1, 4096]) +tokens + batch tensor after cp: torch.Size([1, 4096])tokens +torch.Size([1, 4096]) +batch tensor: batch tensor after cp:labels labelstorch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor: +loss_mask batch tensor after cp:torch.Size([1, 4096]) +loss_mask torch.Size([1, 4096])batch tensor: +attention_mask batch tensor after cp: torch.Size([1, 1, 4096, 4096])attention_mask + batch tensor: torch.Size([1, 1, 4096, 4096])position_ids + torch.Size([1, 4096])batch tensor after cp: + position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: batch tensor:tokens labelstorch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor after cp: + batch tensor:labels loss_masktorch.Size([1, 4096]) +torch.Size([1, 4096]) +batch tensor after cp: loss_mask batch tensor:torch.Size([1, 4096]) +attention_mask batch tensor after cp: torch.Size([1, 1, 4096, 4096])attention_mask +torch.Size([1, 1, 4096, 4096])batch tensor: + batch tensor after cp:position_ids position_idstorch.Size([1, 4096]) +torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: tokensbatch tensor: torch.Size([1, 4096])labels + batch tensor after cp:torch.Size([1, 4096]) +labels batch tensor:torch.Size([1, 4096]) +loss_maskbatch tensor after cp: torch.Size([1, 4096])loss_mask + torch.Size([1, 4096]) +batch tensor:batch tensor after cp: attention_maskattention_mask torch.Size([1, 1, 4096, 4096])torch.Size([1, 1, 4096, 4096]) + +batch tensor:batch tensor after cp: position_idsposition_ids torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:22:13] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 51.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor:batch tensor: tokens tokens torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor:batch tensor after cp: batch tensor:labels tokenstorch.Size([1, 4096])labels + batch tensor:torch.Size([1, 4096])torch.Size([1, 4096]) +loss_mask + batch tensor:batch tensor after cp: torch.Size([1, 4096]) loss_mask + labelsbatch tensor:torch.Size([1, 4096]) +attention_masktorch.Size([1, 4096]) batch tensor: +torch.Size([1, 1, 4096, 4096]) +attention_maskbatch tensor after cp:batch tensor: position_idstorch.Size([1, 1, 4096, 4096])loss_mask +torch.Size([1, 4096])torch.Size([1, 4096])batch tensor: + + position_idsbatch tensor after cp: torch.Size([1, 4096])attention_mask + torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp:batch tensor after cp: tokenslabels torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor after cp:batch tensor after cp: labelsloss_mask torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor after cp:batch tensor after cp: loss_maskattention_mask torch.Size([1, 4096])torch.Size([1, 1, 4096, 4096]) + +batch tensor:batch tensor after cp:batch tensor after cp: attention_maskposition_ids torch.Size([1, 1, 4096, 4096])torch.Size([1, 4096]) +tokens +batch tensor after cp: position_ids torch.Size([1, 4096]) +torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:22:13] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 59.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask batch tensor:torch.Size([1, 4096]) +batch tensor after cp: attention_masktokens torch.Size([1, 1, 4096, 4096]) +batch tensor after cp:batch tensor: position_ids torch.Size([1, 4096])torch.Size([1, 4096]) + +tokens batch tensor: labels torch.Size([1, 4096]) +batch tensor: torch.Size([1, 4096])loss_mask +torch.Size([1, 4096]) +batch tensor: batch tensor:attention_mask labelstorch.Size([1, 1, 4096, 4096]) +batch tensor:torch.Size([1, 4096]) position_ids + torch.Size([1, 4096])batch tensor: + loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids batch tensor after cp:torch.Size([1, 4096]) + tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask batch tensor:torch.Size([1, 1, 4096, 4096]) + batch tensor after cp:tokens position_ids torch.Size([1, 4096]) +torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:22:13] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 57.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: batch tensor:tokens labelstorch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor after cp: + batch tensor:labels loss_masktorch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor after cp: + loss_maskbatch tensor: torch.Size([1, 4096])attention_mask + batch tensor after cp:torch.Size([1, 1, 4096, 4096]) attention_mask + batch tensor:torch.Size([1, 1, 4096, 4096]) +position_ids batch tensor after cp: torch.Size([1, 4096])position_ids + torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096])batch tensor: + tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor:batch tensor after cp: attention_masktokens torch.Size([1, 1, 4096, 4096])torch.Size([1, 4096]) + +batch tensor:batch tensor after cp: position_idslabels torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:22:13] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 58.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens batch tensor after cp: torch.Size([1, 4096])tokens + torch.Size([1, 4096])batch tensor: + labelsbatch tensor after cp: torch.Size([1, 4096]) +labelsbatch tensor: loss_masktorch.Size([1, 4096]) +torch.Size([1, 4096]) +batch tensor after cp: batch tensor:loss_mask attention_mask torch.Size([1, 4096])torch.Size([1, 1, 4096, 4096]) + +batch tensor after cp:batch tensor: position_idsattention_mask torch.Size([1, 4096]) +torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens batch tensor after cp: torch.Size([1, 4096])tokens +torch.Size([1, 4096]) +batch tensor:batch tensor after cp: labelslabels torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor:batch tensor after cp: loss_maskloss_mask torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor:batch tensor after cp: attention_maskattention_mask torch.Size([1, 1, 4096, 4096])torch.Size([1, 1, 4096, 4096]) + +batch tensor after cp: batch tensor:position_ids position_idstorch.Size([1, 4096]) +torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:22:13] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 58.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:22:13 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.03165268898010254 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.031653404235839844 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.03168129920959473 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.0317232608795166 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.03193092346191406 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.03362274169921875 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.03367424011230469 to prepare state dict for ckpt +Running ctx_length=8192, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 8 +CP_SIZE: 1 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +using world size: 8, data-parallel size: 1, context-parallel size: 1, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 1 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 8192 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 8 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 8192 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 8192 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 8 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +> initialized tensor model parallel with size 8 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +INFO:megatron.training.initialize:Setting logging level to 0 +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.040 seconds +> compiling and loading fused kernels ... +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +>>> done with compiling and loading fused kernels. Compilation time: 2.656 seconds +time to initialize megatron (seconds): 7.273 +[after megatron is initialized] datetime: 2025-06-21 21:23:20 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> embedding>>> decoder + +>>> output_layer +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 103872000 + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 103872000 + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 103872000 + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 103872000 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 103872000 > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 103872000 + +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 103872000 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 103872000 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (103872000 elements, 103872000 padded size): + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.final_layernorm.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.word_embeddings.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine + loading distributed checkpoint from gpt-checkpoint at iteration 10 +Running ctx_length=12288, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 12288 +TP_SIZE: 8 +CP_SIZE: 1 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +using world size: 8, data-parallel size: 1, context-parallel size: 1, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 1 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 12288 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 8 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 12288 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 12288 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 8 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... + > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +> initialized tensor model parallel with size 8 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.045 seconds +> compiling and loading fused kernels ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +>>> done with compiling and loading fused kernels. Compilation time: 2.825 seconds +time to initialize megatron (seconds): 7.772 +[after megatron is initialized] datetime: 2025-06-21 21:24:03 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 120649216 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 120649216 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 120649216 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (120649216 elements, 120649216 padded size): + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.embedding.word_embeddings.weight +>>> embedding +>>> decoder +>>> output_layer +>>> embedding>>> embedding + +>>> decoder>>> decoder + +>>> output_layer>>> output_layer + +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 120649216 + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 120649216 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 120649216 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 120649216 +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 120649216 +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.24, 3.35) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:24:04 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=12288, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005659 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 5549 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001729 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 5546 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001542 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 5557 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:24:04 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (691.29, 710.57) + train/valid/test-data-iterators-setup ..........: (31.23, 139.12) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:24:04 +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +Start exporting trace 0 +Done exporting trace 0 + [2025-06-21 21:24:13] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 8545.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.0703 +Theoretical memory footprints: weight and optimizer=1206.09 MB +[Rank 4] (after 1 iterations) memory (MB) | allocated: 1663.05419921875 | max allocated: 3036.47900390625 | reserved: 3330.0 | max reserved: 3330.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 1663.05419921875 | max allocated: 3036.47900390625 | reserved: 3286.0 | max reserved: 3286.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 1663.05419921875 | max allocated: 3036.47900390625 | reserved: 3234.0 | max reserved: 3234.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 1663.05419921875 | max allocated: 3036.47900390625 | reserved: 3332.0 | max reserved: 3332.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 1663.05419921875 | max allocated: 3036.47900390625 | reserved: 3330.0 | max reserved: 3330.0 +[Rank 3] (after 1 iterations) memory (MB) | allocated: 1663.05419921875 | max allocated: 3036.47900390625 | reserved: 3234.0 | max reserved: 3234.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 1663.05419921875 | max allocated: 3036.47900390625 | reserved: 3186.0 | max reserved: 3186.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 1663.05419921875 | max allocated: 3036.47900390625 | reserved: 3350.0 | max reserved: 3350.0 +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:24:13] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 396.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:24:13] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 384.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:24:14] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 382.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:24:14] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 362.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:24:15] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 387.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:24:15] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 368.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:24:15] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 360.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:24:16] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 366.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:24:16] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 387.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:24:16 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.029001235961914062 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.029007911682128906 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.028999805450439453 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.029047012329101562 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.029016733169555664 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.029080867767333984 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.0293581485748291 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.029348373413085938 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3410427570343018 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3411457538604736 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.341339349746704 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3410890102386475 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.341590404510498 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3414371013641357 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.008194923400878906 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3437247276306152 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.012703657150268555 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.012704849243164062 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.011048078536987305 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.011292695999145508 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.01095890998840332 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541058.771169 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541058.7711692 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541058.7711723 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541058.7711828 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541058.771184 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.940696716308594e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.393692016601562e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.703636169433594e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00011110305786132812 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00011467933654785156 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.010844707489013672 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541058.7714128 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00011396408081054688 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.0034580230712890625 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541058.7717767 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00018739700317382812 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.014481544494628906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541058.775936 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.0001010894775390625 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0504610538482666 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541058.8221047 rank: 7, write(async) time: 0.050934791564941406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05179119110107422 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541058.8236988 rank: 3, write(async) time: 0.05228304862976074 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.052942514419555664 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541058.82462 rank: 4, write(async) time: 0.0534367561340332 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05347013473510742 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05349922180175781 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541058.825176 rank: 2, write(async) time: 0.05398750305175781 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541058.8251984 rank: 6, write(async) time: 0.05402636528015137 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.054729461669921875 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541058.8263783 rank: 1, write(async) time: 0.055207252502441406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05202889442443848 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541058.8284345 rank: 0, write(async) time: 0.05249929428100586 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06377935409545898 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541058.8361282 rank: 5, write(async) time: 0.06434893608093262 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 3.337860107421875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.03881502151489258 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.03330874443054199 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.026270627975463867 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.026842594146728516 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.025822162628173828 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.026149749755859375 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.02762007713317871 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.3589859008789062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.031705379486083984 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214589440, before: 1604411392, after: 1819000832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214532096, before: 1608761344, after: 1823293440 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 215064576, before: 1617080320, after: 1832144896 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214519808, before: 1618513920, after: 1833033728 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214650880, before: 1617080320, after: 1831731200 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214560768, before: 1605623808, after: 1820184576 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216526848, before: 1618513920, after: 1835040768 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216596480, before: 1604411392, after: 1821007872 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216674304, before: 1608761344, after: 1825435648 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214683648, before: 1611284480, after: 1825968128 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216285184, before: 1605623808, after: 1821908992 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541059.6831465, rank: 2, write(sync,parallel): 0.6176149845123291 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216547328, before: 1611284480, after: 1827831808 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216375296, before: 1639890944, after: 1856266240 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214650880, before: 1639890944, after: 1854541824 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541059.6954646, rank: 7, write(sync,parallel): 0.6411163806915283 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541059.7044365, rank: 5, write(sync,parallel): 0.651430606842041 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541059.7103128, rank: 6, write(sync,parallel): 0.665940523147583 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.68s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541059.7193556, rank: 3, write(sync,parallel): 0.6523566246032715 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.71s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541059.7404575, rank: 4, write(sync,parallel): 0.6741507053375244 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.73s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.75s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.72s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541059.7567112, rank: 1, write(sync,parallel): 0.6954383850097656 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.74s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.77s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 367415296, before: 1876226048, after: 2243641344 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 366145536, before: 1876226048, after: 2242371584 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541060.0713515, rank: 0, write(sync,parallel): 0.8054764270782471 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.87s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541060.0991924, 1, gather: 0.3002164363861084 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541060.0992687, 2, gather: 0.37886524200439453 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541060.0993369, 3, gather: 0.34513163566589355 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541060.099364, 6, gather: 0.3462851047515869 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541060.0994534, 7, gather: 0.3689446449279785 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541060.0995216, 4, gather: 0.3229236602783203 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541060.0998678, 5, gather: 0.3493316173553467 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541060.1016915, 0, gather: 0.004912614822387695 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541060.111852, metadata_write: 0.010027647018432617 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0174s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3833s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3153s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3934s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3597s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3612s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3374s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3634s + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/8, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.002282857894897461 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.00226593017578125 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.002256155014038086 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.0022783279418945312 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.002254962921142578 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.002267599105834961 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.002263784408569336 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.0022020339965820312 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +Start exporting trace 10 +Done exporting trace 10 +(min, max) time across ranks (ms): + evaluate .......................................: (1976.76, 1985.40) +---------------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on validation set | lm loss value: 1.254503E+01 | lm loss PPL: 2.806974E+05 | +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +batch tensor: tokens torch.Size([1, 12288]) +batch tensor: labels torch.Size([1, 12288]) +batch tensor: loss_mask torch.Size([1, 12288]) +batch tensor: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor: position_ids torch.Size([1, 12288]) +batch tensor after cp: tokens torch.Size([1, 12288]) +batch tensor after cp: labels torch.Size([1, 12288]) +batch tensor after cp: loss_mask torch.Size([1, 12288]) +batch tensor after cp: attention_mask torch.Size([1, 1, 12288, 12288]) +batch tensor after cp: position_ids torch.Size([1, 12288]) +Start exporting trace 11 +Done exporting trace 11 +(min, max) time across ranks (ms): + evaluate .......................................: (340.15, 351.39) +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 1.254503E+01 | lm loss PPL: 2.806974E+05 | +---------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Running ctx_length=16384, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 16384 +TP_SIZE: 8 +CP_SIZE: 1 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +using world size: 8, data-parallel size: 1, context-parallel size: 1, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 1 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 16384 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 8 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 16384 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 16384 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 8 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... + > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +> initialized tensor model parallel with size 8 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.057 seconds +> compiling and loading fused kernels ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +>>> done with compiling and loading fused kernels. Compilation time: 2.554 seconds +time to initialize megatron (seconds): 7.947 +[after megatron is initialized] datetime: 2025-06-21 21:25:01 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 137426432 + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 137426432 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 137426432 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 137426432 + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 137426432 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 137426432 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 137426432 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 137426432 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (137426432 elements, 137426432 padded size): + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (2.93, 3.05) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:25:02 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=16384, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.010458 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 4162 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001818 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 4160 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001534 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 4167 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:25:02 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (844.47, 855.51) + train/valid/test-data-iterators-setup ..........: (28.05, 121.88) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:25:02 +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 [2025-06-21 21:25:10] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 7959.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | + +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.0703 +Theoretical memory footprints: weight and optimizer=1206.09 MB +[Rank 3] (after 1 iterations) memory (MB) | allocated: 1967.16357421875 | max allocated: 3907.14306640625 | reserved: 4358.0 | max reserved: 4358.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 1967.16357421875 | max allocated: 3907.14306640625 | reserved: 4358.0 | max reserved: 4358.0[Rank 1] (after 1 iterations) memory (MB) | allocated: 1967.16357421875 | max allocated: 3907.14306640625 | reserved: 4358.0 | max reserved: 4358.0[Rank 4] (after 1 iterations) memory (MB) | allocated: 1967.16357421875 | max allocated: 3907.14306640625 | reserved: 4358.0 | max reserved: 4358.0 + + +[Rank 0] (after 1 iterations) memory (MB) | allocated: 1967.16357421875 | max allocated: 3907.14306640625 | reserved: 4256.0 | max reserved: 4256.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 1967.16357421875 | max allocated: 3907.14306640625 | reserved: 4358.0 | max reserved: 4358.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 1967.16357421875 | max allocated: 3907.14306640625 | reserved: 4358.0 | max reserved: 4358.0[Rank 6] (after 1 iterations) memory (MB) | allocated: 1967.16357421875 | max allocated: 3907.14306640625 | reserved: 4358.0 | max reserved: 4358.0 + +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:25:10] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 665.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:25:11] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 602.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:25:12] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 601.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor:batch tensor after cp: tokens torch.Size([1, 16384])tokens + batch tensor after cp: labels torch.Size([1, 16384])torch.Size([1, 16384]) + +batch tensor after cp: loss_maskbatch tensor: labelstorch.Size([1, 16384]) +torch.Size([1, 16384]) +batch tensor after cp: attention_maskbatch tensor: loss_masktorch.Size([1, 1, 16384, 16384]) + batch tensor after cp:torch.Size([1, 16384]) position_ids + torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:25:12] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 588.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:25:13] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 603.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:25:13] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 612.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:25:14] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 630.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:25:15] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 595.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:25:15] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 586.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:25:15 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.027995586395263672 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.02806997299194336 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.028158187866210938 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.02820563316345215 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.02822399139404297 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.02838873863220215 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.028420209884643555 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.028934001922607422 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3609187602996826 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.360837697982788 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3605103492736816 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.361487627029419 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.361478328704834 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.361006259918213 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3616158962249756 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.007311820983886719 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.006811618804931641 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.006407022476196289 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.005476713180541992 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.005764484405517578 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541118.0144548 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541118.0144596 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.0051496028900146484 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541118.0144672 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.005112886428833008 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541118.0144913 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541118.0145032 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541118.0145276 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.0046613216400146484 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.0001068115234375 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010967254638671875 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010657310485839844 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010061264038085938 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541118.014587 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.0001049041748046875 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010442733764648438 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00011229515075683594 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.0071980953216552734 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541118.0193224 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.989738464355469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04674887657165527 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541118.0616703 rank: 2, write(async) time: 0.04717707633972168 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04713249206542969 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0471799373626709 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541118.0620549 rank: 5, write(async) time: 0.04755282402038574 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541118.0620759 rank: 6, write(async) time: 0.04761457443237305 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.047864437103271484 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541118.0627866 rank: 1, write(async) time: 0.04831695556640625 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04804420471191406 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541118.0629566 rank: 4, write(async) time: 0.04849982261657715 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04928016662597656 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541118.0642858 rank: 3, write(async) time: 0.049759626388549805 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04973006248474121 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541118.0647817 rank: 7, write(async) time: 0.050196170806884766 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04916191101074219 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541118.068946 rank: 0, write(async) time: 0.04962277412414551 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.6450881958007812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.02646470069885254 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.02822136878967285 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.028650283813476562 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.029317617416381836 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.027907609939575195 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.026160478591918945 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.02629232406616211 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.4543533325195312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.03190279006958008 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214495232, before: 1619386368, after: 1833881600 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216506368, before: 1619386368, after: 1835892736 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214495232, before: 1608941568, after: 1823436800 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214724608, before: 1643614208, after: 1858338816 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216530944, before: 1654988800, after: 1871519744 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214515712, before: 1654988800, after: 1869504512 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216498176, before: 1608941568, after: 1825439744 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214638592, before: 1625456640, after: 1840095232 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214470656, before: 1605668864, after: 1820139520 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216731648, before: 1643614208, after: 1860345856 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214728704, before: 1602146304, after: 1816875008 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541118.9016016, rank: 5, write(sync,parallel): 0.620849609375 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216735744, before: 1605668864, after: 1822404608 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541118.9225638, rank: 4, write(sync,parallel): 0.6367740631103516 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216174592, before: 1625456640, after: 1841631232 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216567808, before: 1602146304, after: 1818714112 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541118.9295206, rank: 3, write(sync,parallel): 0.6459524631500244 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.69s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541118.9499516, rank: 6, write(sync,parallel): 0.6664471626281738 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.70s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541118.9683664, rank: 1, write(sync,parallel): 0.6794097423553467 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541118.9701414, rank: 2, write(sync,parallel): 0.6854557991027832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541118.9713268, rank: 7, write(sync,parallel): 0.6861369609832764 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.73s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.74s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.75s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.76s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.76s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 416464896, before: 1876283392, after: 2292748288 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 417755136, before: 1876283392, after: 2294038528 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541119.5079103, rank: 0, write(sync,parallel): 0.9732847213745117 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.04s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541119.5399458, 1, gather: 0.5312187671661377 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541119.5400856, 4, gather: 0.5789551734924316 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541119.540137, 3, gather: 0.5628156661987305 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541119.5401423, 2, gather: 0.5258159637451172 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541119.540256, 6, gather: 0.5470128059387207 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541119.540291, 5, gather: 0.601064920425415 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541119.5405338, 7, gather: 0.525505781173706 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541119.5425248, 0, gather: 0.004984378814697266 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541119.5562375, metadata_write: 0.01356196403503418 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0215s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5650s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5974s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5442s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5810s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5437s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5499s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6191s + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/8, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.0026404857635498047 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.002609729766845703 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.0026128292083740234 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.0025959014892578125 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.0025815963745117188 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.0026092529296875 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.00260162353515625 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.002606630325317383 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +Start exporting trace 10 +Done exporting trace 10 +(min, max) time across ranks (ms): + evaluate .......................................: (2025.49, 2035.90) +---------------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on validation set | lm loss value: 1.228851E+01 | lm loss PPL: 2.171866E+05 | +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 16384]) +Start exporting trace 11 +Done exporting trace 11 +(min, max) time across ranks (ms): + evaluate .......................................: (616.24, 622.01) +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 1.228851E+01 | lm loss PPL: 2.171866E+05 | +---------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Running ctx_length=24576, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 24576 +TP_SIZE: 8 +CP_SIZE: 1 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +using world size: 8, data-parallel size: 1, context-parallel size: 1, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 1 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 24576 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 8 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 24576 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 24576 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 8 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 8 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +INFO:megatron.training.initialize:Setting logging level to 0 +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +INFO:megatron.training.initialize:Setting logging level to 0 +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.042 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.583 seconds +time to initialize megatron (seconds): 7.639 +[after megatron is initialized] datetime: 2025-06-21 21:26:00 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 170980864 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 170980864 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 170980864 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 170980864 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 170980864 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 170980864 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 170980864 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (170980864 elements, 170980864 padded size): + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.embedding.position_embeddings.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.word_embeddings.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 170980864 +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (209.74, 209.93) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:26:02 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=24576, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.004965 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2774 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001819 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2773 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001509 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2778 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:26:02 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (1731.97, 1744.41) + train/valid/test-data-iterators-setup ..........: (29.24, 122.22) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:26:02 +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +Start exporting trace 0 +Done exporting trace 0 + [2025-06-21 21:26:14] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 11885.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.0703 +Theoretical memory footprints: weight and optimizer=1206.09 MB +[Rank 6] (after 1 iterations) memory (MB) | allocated: 2671.38232421875 | max allocated: 5752.47119140625 | reserved: 6384.0 | max reserved: 6384.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 2671.38232421875 | max allocated: 5752.47119140625 | reserved: 6384.0 | max reserved: 6384.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 2671.38232421875 | max allocated: 5752.47119140625 | reserved: 6384.0 | max reserved: 6384.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 2671.38232421875 | max allocated: 5752.47119140625 | reserved: 6384.0 | max reserved: 6384.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 2671.38232421875 | max allocated: 5752.47119140625 | reserved: 6384.0 | max reserved: 6384.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 2671.38232421875 | max allocated: 5752.47119140625 | reserved: 6384.0 | max reserved: 6384.0[Rank 0] (after 1 iterations) memory (MB) | allocated: 2671.38232421875 | max allocated: 5752.47119140625 | reserved: 6192.0 | max reserved: 6192.0 + +[Rank 3] (after 1 iterations) memory (MB) | allocated: 2671.38232421875 | max allocated: 5752.47119140625 | reserved: 6384.0 | max reserved: 6384.0 +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:26:15] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 1429.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:26:16] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 1408.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:26:18] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 1395.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:26:19] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 1415.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:26:21] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 1397.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:26:22] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 1398.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:26:23] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 1392.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:26:25] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 1388.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +batch tensor: tokens torch.Size([1, 24576]) +batch tensor: labels torch.Size([1, 24576]) +batch tensor: loss_mask torch.Size([1, 24576]) +batch tensor: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor: position_ids torch.Size([1, 24576]) +batch tensor after cp: tokens torch.Size([1, 24576]) +batch tensor after cp: labels torch.Size([1, 24576]) +batch tensor after cp: loss_mask torch.Size([1, 24576]) +batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 24576]) +batch tensor after cp: position_ids torch.Size([1, 24576]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:26:26] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 1405.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:26:26 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.04160284996032715 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.041614532470703125 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.0416107177734375 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.046037912368774414 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.04607844352722168 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.04708385467529297 to prepare state dict for ckpt +WARNING:megatron.core.dist_checkpointing.serialization:Overwriting old incomplete / corrupted checkpoint... +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.0541529655456543 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.05711245536804199 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 5.289097547531128 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 5.290191173553467 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 5.290313959121704 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 5.2905189990997314 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 5.290249824523926 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 5.290456771850586 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 5.291146755218506 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.007425069808959961 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.007442951202392578 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.007016658782958984 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.007693290710449219 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.007966279983520508 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.007060050964355469 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541193.3079562 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541193.3079598 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541193.3079646 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541193.3079617 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541193.3079686 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.005417346954345703 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.003765106201171875 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.0001010894775390625 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010561943054199219 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.417533874511719e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010609626770019531 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010466575622558594 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541193.308061 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541193.3080652 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00012040138244628906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00011992454528808594 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.00754094123840332 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541193.3130016 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010418891906738281 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04463553428649902 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541193.353055 rank: 4, write(async) time: 0.0450894832611084 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.046289920806884766 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541193.354696 rank: 2, write(async) time: 0.0467371940612793 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.046884775161743164 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541193.3553097 rank: 6, write(async) time: 0.047347068786621094 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.047051191329956055 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541193.3554707 rank: 5, write(async) time: 0.04751157760620117 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04731941223144531 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541193.3557703 rank: 7, write(async) time: 0.04780983924865723 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05158090591430664 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541193.360121 rank: 1, write(async) time: 0.052060842514038086 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0474855899810791 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541193.3609312 rank: 0, write(async) time: 0.04793047904968262 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05866694450378418 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541193.36723 rank: 3, write(async) time: 0.05916285514831543 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 1.6450881958007812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.02710580825805664 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.030019521713256836 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.024190902709960938 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.024416685104370117 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.02415323257446289 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.03634357452392578 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.02682948112487793 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216702976, before: 1593970688, after: 1810673664 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214679552, before: 1603723264, after: 1818402816 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216461312, before: 1615781888, after: 1832243200 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216162304, before: 1603723264, after: 1819885568 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214491136, before: 1601064960, after: 1815556096 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214626304, before: 1618145280, after: 1832771584 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541194.4442146, rank: 6, write(sync,parallel): 0.7427222728729248 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216498176, before: 1601064960, after: 1817563136 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216567808, before: 1620668416, after: 1837236224 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216481792, before: 1618145280, after: 1834627072 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214466560, before: 1621331968, after: 1835798528 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 2.193450927734375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214462464, before: 1615777792, after: 1830240256 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214765568, before: 1620668416, after: 1835433984 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541194.496626, rank: 2, write(sync,parallel): 0.795135498046875 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.0330805778503418 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214519808, before: 1593970688, after: 1808490496 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541194.505365, rank: 3, write(sync,parallel): 0.802776575088501 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.83s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541194.519352, rank: 7, write(sync,parallel): 0.834951639175415 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541194.5308878, rank: 1, write(sync,parallel): 0.8299825191497803 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.86s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.88s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541194.5464394, rank: 4, write(sync,parallel): 0.8445541858673096 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216584192, before: 1621331968, after: 1837916160 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.90s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.91s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.90s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541194.599723, rank: 5, write(sync,parallel): 0.8935947418212891 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.97s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 516882432, before: 1891807232, after: 2408689664 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 518422528, before: 1891807232, after: 2410229760 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541196.3316915, rank: 0, write(sync,parallel): 1.8243696689605713 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.90s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541196.375567, 2, gather: 1.8422553539276123 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541196.3756928, 4, gather: 1.797248125076294 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541196.3757331, 6, gather: 1.8680410385131836 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541196.3758368, 7, gather: 1.8220148086547852 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541196.3757129, 1, gather: 1.8027369976043701 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541196.37581, 3, gather: 1.828871250152588 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541196.375838, 5, gather: 1.7337520122528076 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541196.3777394, 0, gather: 0.004659175872802734 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0075s +Running ctx_length=32768, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 32768 +TP_SIZE: 8 +CP_SIZE: 1 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 8, data-parallel size: 1, context-parallel size: 1, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 1 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 32768 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 8 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 32768 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 32768 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 8 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... + > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +> initialized tensor model parallel with size 8 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +INFO:megatron.training.initialize:Setting logging level to 0 +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.042 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +>>> done with compiling and loading fused kernels. Compilation time: 2.425 seconds +time to initialize megatron (seconds): 7.306 +[after megatron is initialized] datetime: 2025-06-21 21:27:45 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 204535296 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 204535296 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 204535296 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 204535296 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 204535296 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 204535296 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 204535296 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 204535296 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (204535296 elements, 204535296 padded size): + module.decoder.final_layernorm.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (4.71, 4.89) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:27:47 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=32768, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005308 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2081 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001799 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2080 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001482 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2083 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:27:47 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (1559.76, 1560.28) + train/valid/test-data-iterators-setup ..........: (31.25, 118.37) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:27:47 +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.0703 +Theoretical memory footprints: weight and optimizer=1206.09 MB + [2025-06-21 21:28:01] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 14007.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 0] (after 1 iterations) memory (MB) | allocated: 3503.60107421875 | max allocated: 7725.79931640625 | reserved: 8316.0 | max reserved: 8316.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 3503.60107421875 | max allocated: 7725.79931640625 | reserved: 8156.0 | max reserved: 8156.0 +[Rank 3] (after 1 iterations) memory (MB) | allocated: 3503.60107421875 | max allocated: 7725.79931640625 | reserved: 8060.0 | max reserved: 8060.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 3503.60107421875 | max allocated: 7725.79931640625 | reserved: 8572.0 | max reserved: 8572.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 3503.60107421875 | max allocated: 7725.79931640625 | reserved: 8316.0 | max reserved: 8316.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 3503.60107421875 | max allocated: 7725.79931640625 | reserved: 7932.0 | max reserved: 7932.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 3503.60107421875 | max allocated: 7725.79931640625 | reserved: 8572.0 | max reserved: 8572.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 3503.60107421875 | max allocated: 7725.79931640625 | reserved: 8572.0 | max reserved: 8572.0 +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:28:04] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 2462.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor after cp: tokensbatch tensor: torch.Size([1, 32768])attention_mask + batch tensor after cp:torch.Size([1, 1, 32768, 32768]) +labels batch tensor:torch.Size([1, 32768]) +position_idsbatch tensor after cp: torch.Size([1, 32768])loss_mask + torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:28:06] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 2476.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:28:09] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 2525.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:28:11] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 2681.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:28:14] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 2571.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:28:16] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 2426.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:28:19] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 2521.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:28:21] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 2500.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +batch tensor: tokens torch.Size([1, 32768]) +batch tensor: labels torch.Size([1, 32768]) +batch tensor: loss_mask torch.Size([1, 32768]) +batch tensor: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([1, 32768]) +batch tensor after cp: tokens torch.Size([1, 32768]) +batch tensor after cp: labels torch.Size([1, 32768]) +batch tensor after cp: loss_mask torch.Size([1, 32768]) +batch tensor after cp: attention_mask torch.Size([1, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([1, 32768]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:28:24] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 2589.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:28:24 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.0449674129486084 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.04502153396606445 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.0454099178314209 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.04539346694946289 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.04554295539855957 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.046196699142456055 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.046950578689575195 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.04723954200744629 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +Running ctx_length=40960, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 40960 +TP_SIZE: 8 +CP_SIZE: 1 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +using world size: 8, data-parallel size: 1, context-parallel size: 1, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 1 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 40960 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 8 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 40960 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 40960 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 8 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 8 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.045 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +INFO:megatron.training.initialize:Setting logging level to 0 +>>> done with compiling and loading fused kernels. Compilation time: 2.727 seconds +time to initialize megatron (seconds): 7.721 +[after megatron is initialized] datetime: 2025-06-21 21:29:36 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 238089728 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 238089728 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 238089728 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 238089728 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 238089728 + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 238089728 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 238089728 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 238089728 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (238089728 elements, 238089728 padded size): + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.embedding.position_embeddings.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.final_layernorm.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.embedding.word_embeddings.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (2.81, 3.12) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:29:38 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=40960, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.004303 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1664 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001694 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1664 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001344 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1667 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:29:38 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (1835.02, 1836.24) + train/valid/test-data-iterators-setup ..........: (15.99, 116.35) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:29:38 +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +Start exporting trace 0 +Done exporting trace 0 + [2025-06-21 21:29:48] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 9534.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.0703 +Theoretical memory footprints: weight and optimizer=1206.09 MB +[Rank 4] (after 1 iterations) memory (MB) | allocated: 4463.81982421875 | max allocated: 9827.87744140625 | reserved: 10850.0 | max reserved: 10850.0[Rank 1] (after 1 iterations) memory (MB) | allocated: 4463.81982421875 | max allocated: 9827.87744140625 | reserved: 10850.0 | max reserved: 10850.0[Rank 3] (after 1 iterations) memory (MB) | allocated: 4463.81982421875 | max allocated: 9827.87744140625 | reserved: 10850.0 | max reserved: 10850.0 + + +[Rank 7] (after 1 iterations) memory (MB) | allocated: 4463.81982421875 | max allocated: 9827.87744140625 | reserved: 10690.0 | max reserved: 10690.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 4463.81982421875 | max allocated: 9827.87744140625 | reserved: 10530.0 | max reserved: 10530.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 4463.81982421875 | max allocated: 9827.87744140625 | reserved: 10850.0 | max reserved: 10850.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 4463.81982421875 | max allocated: 9827.87744140625 | reserved: 10370.0 | max reserved: 10370.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 4463.81982421875 | max allocated: 9827.87744140625 | reserved: 10530.0 | max reserved: 10530.0 +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:29:59] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 10892.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens batch tensor: torch.Size([1, 40960])tokens +batch tensor: labels torch.Size([1, 40960]) +torch.Size([1, 40960])batch tensor: +loss_mask torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960])batch tensor: + batch tensor:attention_mask loss_mask torch.Size([1, 1, 40960, 40960])torch.Size([1, 40960]) + +batch tensor: position_ids batch tensor:torch.Size([1, 40960]) +attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:30:04] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 5140.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:30:08] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 3703.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:30:11] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 3785.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:30:15] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 3741.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:30:19] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 3791.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:30:23] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 3744.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:30:26] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 3785.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:30:30] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 3824.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:30:30 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.03902316093444824 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.03905057907104492 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.0390777587890625 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.03911566734313965 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.03912162780761719 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.0391390323638916 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.03913760185241699 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.039672136306762695 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.2387640476226807 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.238776445388794 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.238809585571289 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.2388553619384766 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.2390336990356445 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.2390785217285156 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.2390921115875244 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.007208108901977539 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.007253885269165039 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.006964921951293945 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.007189035415649414 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.00676727294921875 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541434.9739745 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541434.973976 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541434.9739747 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.007178544998168945 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.007204294204711914 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.006862163543701172 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541434.9740076 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541434.97402 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541434.9740243 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541434.974023 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.34600830078125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.608268737792969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.751319885253906e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.797645568847656e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.226799011230469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.822845458984375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.679794311523438e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.007479429244995117 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541434.9790034 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010514259338378906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.043416738510131836 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541435.0179093 rank: 2, write(async) time: 0.0438838005065918 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04413962364196777 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541435.018648 rank: 3, write(async) time: 0.04462575912475586 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04678821563720703 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04691624641418457 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541435.0212939 rank: 4, write(async) time: 0.04728293418884277 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541435.021413 rank: 6, write(async) time: 0.04743552207946777 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0475771427154541 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541435.0220747 rank: 5, write(async) time: 0.04809904098510742 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.048311710357666016 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541435.0228333 rank: 1, write(async) time: 0.04880952835083008 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04862570762634277 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541435.023106 rank: 7, write(async) time: 0.04912734031677246 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04694533348083496 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541435.0264053 rank: 0, write(async) time: 0.04740190505981445 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 2.288818359375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 1.5735626220703125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.52587890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.6450881958007812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.024997234344482422 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.026682138442993164 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.024814128875732422 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.025957822799682617 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.025536775588989258 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.026275634765625 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.027718067169189453 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214495232, before: 1611837440, after: 1826332672 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216444928, before: 1611202560, after: 1827647488 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214343680, before: 1629151232, after: 1843494912 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214421504, before: 1611202560, after: 1825624064 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214421504, before: 1615863808, after: 1830285312 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214515712, before: 1617113088, after: 1831628800 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216641536, before: 1611837440, after: 1828478976 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216535040, before: 1629151232, after: 1845686272 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214667264, before: 1629958144, after: 1844625408 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 214663168, before: 1613639680, after: 1828302848 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216543232, before: 1617113088, after: 1833656320 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216223744, before: 1615863808, after: 1832087552 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541435.7896724, rank: 7, write(sync,parallel): 0.5612914562225342 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541435.8013248, rank: 4, write(sync,parallel): 0.5706212520599365 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541435.80762, rank: 6, write(sync,parallel): 0.5734515190124512 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 215891968, before: 1613639680, after: 1829531648 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 216682496, before: 1629958144, after: 1846640640 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541435.8180516, rank: 5, write(sync,parallel): 0.5828564167022705 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541435.8195891, rank: 1, write(sync,parallel): 0.5910701751708984 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541435.8566203, rank: 2, write(sync,parallel): 0.6292545795440674 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541435.8575754, rank: 3, write(sync,parallel): 0.6291158199310303 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.3589859008789062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.029036998748779297 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 718508032, before: 1880240128, after: 2598748160 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 719814656, before: 1880252416, after: 2600067072 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541437.8771925, rank: 0, write(sync,parallel): 1.348818302154541 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.42s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541437.9157948, 2, gather: 1.3870878219604492 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541437.915793, 3, gather: 1.3870398998260498 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541437.9158893, 5, gather: 1.38714599609375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541437.9158912, 1, gather: 1.3862686157226562 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541437.916142, 4, gather: 1.3875482082366943 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541437.9162, 6, gather: 1.3875155448913574 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541437.9163232, 7, gather: 1.387528896331787 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541437.917782, 0, gather: 0.004753828048706055 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541437.9305139, metadata_write: 0.012610197067260742 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0199s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 1.4044s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 1.4044s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 1.4042s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 1.4045s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 1.4044s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 1.4040s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 1.4045s + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/8, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.0025734901428222656 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.002593994140625 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.002510547637939453 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.002420186996459961 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.0026025772094726562 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.002460002899169922 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.002478361129760742 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.002507925033569336 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +Start exporting trace 10 +Done exporting trace 10 +(min, max) time across ranks (ms): + evaluate .......................................: (5637.44, 5638.60) +---------------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on validation set | lm loss value: 1.288522E+01 | lm loss PPL: 3.944393E+05 | +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +batch tensor: tokens torch.Size([1, 40960]) +batch tensor: labels torch.Size([1, 40960]) +batch tensor: loss_mask torch.Size([1, 40960]) +batch tensor: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor: position_ids torch.Size([1, 40960]) +batch tensor after cp: tokens torch.Size([1, 40960]) +batch tensor after cp: labels torch.Size([1, 40960]) +batch tensor after cp: loss_mask torch.Size([1, 40960]) +batch tensor after cp: attention_mask torch.Size([1, 1, 40960, 40960]) +batch tensor after cp: position_ids torch.Size([1, 40960]) +Start exporting trace 11 +Done exporting trace 11 +(min, max) time across ranks (ms): + evaluate .......................................: (3778.51, 3779.14) +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 1.288522E+01 | lm loss PPL: 3.944393E+05 | +---------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Running ctx_length=49152, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 49152 +TP_SIZE: 8 +CP_SIZE: 1 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3