diff --git "a/attnserver.run_attnserver.slurm.sh.343219.out.log" "b/attnserver.run_attnserver.slurm.sh.343219.out.log" new file mode 100644--- /dev/null +++ "b/attnserver.run_attnserver.slurm.sh.343219.out.log" @@ -0,0 +1,13677 @@ +Running ctx_length=1024, TP_SIZE=4, CP_SIZE=4, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 1024 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 1024 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 16, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 1024 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 16 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 1024 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 1024 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 16 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.052 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 3.199 seconds +time to initialize megatron (seconds): 10.309 +[after megatron is initialized] datetime: 2025-06-21 21:24:15 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 +>>> embedding + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (144247808 elements, 144247808 padded size): + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.embedding.position_embeddings.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.mlp.linear_fc1.weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (2.77, 2.90) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:24:15 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=1024, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.018044 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 66592 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.003666 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 66562 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.003618 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 66686 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:24:15 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (284.76, 304.98) + train/valid/test-data-iterators-setup ..........: (37.42, 174.92) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:24:15 +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor:batch tensor: tokenstokens torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor:batch tensor: labelslabels batch tensor: torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: tokens batch tensor:torch.Size([1, 1024]) +tokens batch tensor: labels torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: batch tensor:labels attention_mask torch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024]) +batch tensor: batch tensor:loss_mask position_idstorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +tokens batch tensor:batch tensor: loss_maskloss_mask torch.Size([1, 1024])torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: + batch tensor:attention_mask attention_mask torch.Size([1, 1, 1024, 1024])batch tensor: +torch.Size([1, 1, 1024, 1024]) +labelsbatch tensor: batch tensor: torch.Size([1, 1024]) position_ids +position_ids batch tensor:torch.Size([1, 1024])batch tensor:torch.Size([1, 1024]) + + loss_masktokens torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1024])batch tensor: + position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor:batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024])batch tensor after cp: +batch tensor after cp: position_ids torch.Size([1, 256])tokens + torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor after cp:batch tensor after cp: labelslabels torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor after cp:batch tensor after cp: loss_maskloss_mask torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor after cp:batch tensor after cp: attention_maskattention_mask torch.Size([1, 1, 256, 1024])torch.Size([1, 1, 256, 1024]) + +batch tensor after cp:batch tensor after cp: position_idsposition_ids torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +Start exporting trace 0 +Done exporting trace 0 + [2025-06-21 21:24:28] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 12298.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB +[Rank 9] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0 +[Rank 8] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0 +[Rank 15] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0[Rank 12] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0[Rank 5] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0 + +[Rank 3] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0 +[Rank 14] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0 + +[Rank 4] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0 +[Rank 10] (after 1 iterations) memory (MB) | allocated: 1802.13232421875 | max allocated: 1802.13330078125 | reserved: 1978.0 | max reserved: 1978.0 +[Rank 13] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0[Rank 11] (after 1 iterations) memory (MB) | allocated: 1802.13232421875 | max allocated: 1802.13330078125 | reserved: 1978.0 | max reserved: 1978.0 + +[Rank 6] (after 1 iterations) memory (MB) | allocated: 1801.94482421875 | max allocated: 1801.94580078125 | reserved: 1978.0 | max reserved: 1978.0 +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024])batch tensor: +batch tensor: loss_mask torch.Size([1, 1024])tokens + batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: labels torch.Size([1, 1024]) +batch tensor:batch tensor:batch tensor: tokenstokens tokens torch.Size([1, 1024])torch.Size([1, 1024]) + +torch.Size([1, 1024])batch tensor:batch tensor: +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) + labels torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask batch tensor: labels torch.Size([1, 1024]) labels + torch.Size([1, 1024]) +batch tensor:torch.Size([1, 1024]) batch tensor: +attention_mask batch tensor: loss_mask loss_masktorch.Size([1, 1, 1024, 1024])torch.Size([1, 1024]) + +torch.Size([1, 1024]) +batch tensor: tokens batch tensor: torch.Size([1, 1024])tokens +batch tensor: labels torch.Size([1, 1024])torch.Size([1, 1024]) +batch tensor: + loss_mask batch tensor: torch.Size([1, 1024])labels +batch tensor:batch tensor: batch tensor:attention_maskposition_ids attention_masktorch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024]) +batch tensor: + position_idsbatch tensor: position_idstorch.Size([1, 1024]) +torch.Size([1, 1024]) + torch.Size([1, 1024])batch tensor: + attention_maskbatch tensor: loss_masktorch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1024]) +batch tensor: position_idsbatch tensor: batch tensor after cp: torch.Size([1, 1024]) attention_mask +batch tensor: tokens torch.Size([1, 1024]) +tokens batch tensor after cp:torch.Size([1, 1, 1024, 1024])torch.Size([1, 256]) + +batch tensor: labels torch.Size([1, 1024]) +batch tensor:tokensbatch tensor after cp: labelstorch.Size([1, 256])position_ids torch.Size([1, 1024]) +torch.Size([1, 256]) + +batch tensor after cp:batch tensor after cp: labelsloss_mask torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp:batch tensor after cp: attention_maskloss_mask torch.Size([1, 1, 256, 1024])batch tensor: +batch tensor:batch tensor: tokens tokens torch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor: +torch.Size([1, 256]) +batch tensor after cp:batch tensor after cp:tokens position_idsattention_mask torch.Size([1, 256]) +torch.Size([1, 1, 256, 1024]) +labels torch.Size([1, 1024])batch tensor: +batch tensor after cp:torch.Size([1, 1024]) +position_ids torch.Size([1, 256])batch tensor: + labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) + batch tensor:labels loss_mask torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: batch tensor:loss_mask attention_masktorch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024])batch tensor: +batch tensor: tokens torch.Size([1, 1024]) + batch tensor:attention_mask position_ids torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024]) + +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp:batch tensor after cp: tokensbatch tensor after cp:labels tokenstorch.Size([1, 256])torch.Size([1, 256]) + +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +torch.Size([1, 256])batch tensor after cp:batch tensor after cp: + loss_maskbatch tensor after cp:labels batch tensor after cp: torch.Size([1, 256]) tokenstorch.Size([1, 256])labels +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) + +batch tensor after cp: batch tensor after cp:torch.Size([1, 256]) +torch.Size([1, 256])loss_maskattention_mask + batch tensor after cp:torch.Size([1, 256])batch tensor after cp:torch.Size([1, 1, 256, 1024]) + labels +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp:loss_mask batch tensor after cp: position_idstorch.Size([1, 256]) torch.Size([1, 256])attention_mask + + torch.Size([1, 256])batch tensor after cp:batch tensor after cp:torch.Size([1, 1, 256, 1024]) + +loss_maskattention_maskbatch tensor after cp: torch.Size([1, 256])position_idstorch.Size([1, 1, 256, 1024]) + +batch tensor after cp:torch.Size([1, 256])batch tensor after cp: + attention_maskposition_ids torch.Size([1, 1, 256, 1024])torch.Size([1, 256]) + +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp:batch tensor after cp: attention_masktokens torch.Size([1, 1, 256, 1024])torch.Size([1, 256]) + +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labelsbatch tensor after cp: torch.Size([1, 256])tokens +batch tensor after cp:batch tensor after cp: labelsposition_ids torch.Size([1, 256])torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256])torch.Size([1, 256]) + + +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp:batch tensor after cp: attention_masklabels torch.Size([1, 1, 256, 1024])torch.Size([1, 256]) + +batch tensor after cp:batch tensor after cp: position_idsloss_mask torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:24:28] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 260.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: tokens batch tensor: torch.Size([1, 1024])tokens +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor:torch.Size([1, 1024])batch tensor: +labels batch tensor: torch.Size([1, 1024])tokens +labels batch tensor: torch.Size([1, 1024])loss_mask + batch tensor:torch.Size([1, 1024]) torch.Size([1, 1024])loss_mask + +batch tensor: position_ids torch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor:batch tensor: +labelsattention_maskbatch tensor: torch.Size([1, 1024])attention_mask +torch.Size([1, 1, 1024, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor:torch.Size([1, 1, 1024, 1024])batch tensor: +loss_maskposition_ids batch tensor:torch.Size([1, 1024])torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) + +batch tensor:position_ids attention_masktorch.Size([1, 1024]) +batch tensor:torch.Size([1, 1, 1024, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: tokensposition_ids torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor:batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask batch tensor:torch.Size([1, 1, 1024, 1024]) +batch tensor: position_idstokens torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +tokens batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor:torch.Size([1, 1024]) attention_mask +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) + torch.Size([1, 1, 1024, 1024])batch tensor: + labelsbatch tensor: torch.Size([1, 1024])position_ids + batch tensor:torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +loss_mask torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_idsbatch tensor after cp: torch.Size([1, 1024])tokens +batch tensor: tokens torch.Size([1, 1024]) + torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp:batch tensor: tokenslabels torch.Size([1, 1024])batch tensor after cp: +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +torch.Size([1, 256]) tokensbatch tensor: +batch tensor after cp: position_ids torch.Size([1, 256]) + batch tensor after cp:loss_mask batch tensor after cp:torch.Size([1, 256]) labels +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +torch.Size([1, 1024]) + tokenstorch.Size([1, 256]) batch tensor after cp:batch tensor: +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024])batch tensor after cp: +torch.Size([1, 256]) +labelsbatch tensor after cp: attention_maskbatch tensor after cp: batch tensor after cp:torch.Size([1, 1, 1024, 1024]) torch.Size([1, 256])loss_mask + +tokenslabels torch.Size([1, 256])torch.Size([1, 256])batch tensor after cp:batch tensor:torch.Size([1, 256]) + +batch tensor after cp: +loss_maskbatch tensor after cp:batch tensor after cp: position_idsattention_mask torch.Size([1, 256])torch.Size([1, 1, 256, 1024]) + batch tensor after cp:tokens position_ids torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor after cp: labels torch.Size([1, 256]) +loss_masklabelsbatch tensor after cp:torch.Size([1, 1024]) + batch tensor after cp:torch.Size([1, 256])batch tensor after cp: +attention_mask +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +position_ids torch.Size([1, 1, 256, 1024])torch.Size([1, 256]) batch tensor after cp:torch.Size([1, 256]) + + tokens +batch tensor after cp: batch tensor after cp:attention_mask position_idsloss_masktorch.Size([1, 256]) + torch.Size([1, 256])batch tensor after cp: +torch.Size([1, 1, 256, 1024])torch.Size([1, 256]) batch tensor after cp: + +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +labelsbatch tensor after cp: torch.Size([1, 256])attention_mask +position_ids batch tensor after cp: torch.Size([1, 1, 256, 1024])torch.Size([1, 256])loss_mask + + batch tensor after cp:torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +position_ids batch tensor after cp: torch.Size([1, 256])attention_mask + torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:24:28] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 43.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor:batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) + tokensbatch tensor: attention_maskbatch tensor: torch.Size([1, 1, 1024, 1024]) + torch.Size([1, 1024])batch tensor:tokens +position_ids batch tensor:torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +labelstorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor:batch tensor: loss_masklabels torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: labels torch.Size([1, 1024]) +batch tensor:batch tensor: attention_maskloss_mask torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024]) + +batch tensor:batch tensor: position_idsattention_mask torch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor:batch tensor: attention_mask tokenstorch.Size([1, 1, 1024, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor: position_idstorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor:batch tensor: attention_mask torch.Size([1, 1, 1024, 1024])tokens +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor after cp:batch tensor after cp: labelslabels batch tensor after cp:torch.Size([1, 256])torch.Size([1, 256]) + + batch tensor: position_idstorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor after cp:batch tensor after cp:tokens loss_mask loss_mask torch.Size([1, 256])torch.Size([1, 256])torch.Size([1, 256]) + + +batch tensor after cp:batch tensor after cp:batch tensor after cp: attention_maskattention_masklabels torch.Size([1, 1, 256, 1024])torch.Size([1, 1, 256, 1024])torch.Size([1, 256]) + + +batch tensor: loss_maskbatch tensor: torch.Size([1, 1024]) +batch tensor after cp: batch tensor after cp:batch tensor after cp:position_ids position_idsloss_masktorch.Size([1, 256]) +torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) + batch tensor:tokens attention_mask torch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1024])batch tensor: + position_ids batch tensor:torch.Size([1, 1024]) +labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor: loss_maskbatch tensor: torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) + batch tensor: tokensattention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor: position_idstorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256])batch tensor after cp: +batch tensor: tokens torch.Size([1, 256])tokens + batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: torch.Size([1, 1024])loss_maskbatch tensor after cp: + torch.Size([1, 256])batch tensor:tokens + batch tensor after cp:labels torch.Size([1, 256]) attention_mask + torch.Size([1, 1024])batch tensor after cp:torch.Size([1, 1, 256, 1024]) + +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor:labelsbatch tensor after cp: torch.Size([1, 256])position_idsloss_mask + batch tensor:torch.Size([1, 1024]) batch tensor after cp: +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) + torch.Size([1, 256]) batch tensor:loss_mask +batch tensor after cp: tokens torch.Size([1, 256]) +tokens attention_masktorch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +torch.Size([1, 1, 1024, 1024])batch tensor after cp: +attention_masktorch.Size([1, 1024]) batch tensor: +torch.Size([1, 1, 256, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +position_idsbatch tensor:batch tensor after cp: torch.Size([1, 1024])labelsposition_ids + torch.Size([1, 1024])torch.Size([1, 256]) +batch tensor after cp: +batch tensor after cp: position_ids batch tensor after cp:torch.Size([1, 256]) + tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor: tokensloss_mask torch.Size([1, 1024])torch.Size([1, 256]) + +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp:batch tensor: labelsattention_mask torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +torch.Size([1, 1, 1024, 1024])batch tensor after cp: + loss_maskbatch tensor: position_idstorch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:24:28] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 42.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024])batch tensor: +batch tensor: position_ids tokenstorch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024])batch tensor: +torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) + tokensbatch tensor: labels torch.Size([1, 1024]) +batch tensor: torch.Size([1, 1024])loss_mask +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) + batch tensor:torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor:labelsbatch tensor: torch.Size([1, 1024])attention_masktokens + batch tensor:torch.Size([1, 1, 1024, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +loss_masktorch.Size([1, 1024])batch tensor: position_ids +torch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor:batch tensor: +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) + labelsattention_mask torch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: batch tensor:loss_mask position_idstorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp:batch tensor after cp: loss_mask tokenstorch.Size([1, 256]) +batch tensor after cp:torch.Size([1, 256]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +attention_maskbatch tensor after cp: labelstorch.Size([1, 1, 256, 1024]) +torch.Size([1, 256])batch tensor after cp: + position_idsbatch tensor after cp: torch.Size([1, 256])loss_mask + torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor:batch tensor: tokens tokens torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor:torch.Size([1, 1024]) +labels torch.Size([1, 1024])batch tensor: +batch tensor: labels torch.Size([1, 1024]) + labelsbatch tensor: torch.Size([1, 1024])loss_mask + batch tensor:torch.Size([1, 1024]) +loss_mask torch.Size([1, 1024])batch tensor: + attention_mask batch tensor:torch.Size([1, 1, 1024, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +attention_mask batch tensor: torch.Size([1, 1, 1024, 1024])position_ids + batch tensor:torch.Size([1, 1024]) +position_ids torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens batch tensor after cp:batch tensor after cp:torch.Size([1, 1024]) +tokenstokens batch tensor:torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +torch.Size([1, 256])labelsbatch tensor after cp: +labels batch tensor after cp: torch.Size([1, 1024])torch.Size([1, 256]) +labels +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor:batch tensor after cp: torch.Size([1, 256])loss_maskloss_mask + batch tensor after cp:torch.Size([1, 256]) torch.Size([1, 1024]) +loss_mask +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: batch tensor: attention_maskattention_masktorch.Size([1, 256]) + torch.Size([1, 1, 256, 1024])batch tensor after cp:torch.Size([1, 1, 1024, 1024]) + +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp:batch tensor:attention_mask position_idstorch.Size([1, 1, 256, 1024])position_ids + torch.Size([1, 1024])batch tensor after cp:torch.Size([1, 256]) + +position_ids torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: labels torch.Size([1, 1024]) +Start exporting trace 4 +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor:batch tensor after cp: position_ids tokenstorch.Size([1, 1024]) +torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +Done exporting trace 4 + [2025-06-21 21:24:28] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 40.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: tokens batch tensor:torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) + tokensbatch tensor: batch tensor: labels torch.Size([1, 1024])torch.Size([1, 1024])tokens + +batch tensor: loss_mask batch tensor:torch.Size([1, 1024])torch.Size([1, 1024]) + +labelsbatch tensor:batch tensor: batch tensor:torch.Size([1, 1024]) labels +attention_maskbatch tensor:tokens torch.Size([1, 1024])loss_mask +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) + torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024])batch tensor: +torch.Size([1, 1024]) + + batch tensor:loss_maskbatch tensor: batch tensor: torch.Size([1, 1024])attention_mask +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +position_idslabels batch tensor: torch.Size([1, 1024]) torch.Size([1, 1, 1024, 1024])attention_masktorch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) + + +batch tensor:batch tensor:torch.Size([1, 1, 1024, 1024]) +position_idsloss_mask batch tensor: torch.Size([1, 1024]) +position_idstorch.Size([1, 1024])batch tensor after cp: +batch tensor: position_ids torch.Size([1, 1024]) +torch.Size([1, 1024]) +tokensbatch tensor: attention_masktorch.Size([1, 256]) +batch tensor:batch tensor after cp: tokenstokens torch.Size([1, 256]) +batch tensor after cp:torch.Size([1, 1024]) labels + torch.Size([1, 256])batch tensor: +torch.Size([1, 1, 1024, 1024])batch tensor after cp: + labelsbatch tensor: torch.Size([1, 256])position_ids + batch tensor after cp:labels loss_masktorch.Size([1, 1024]) +torch.Size([1, 256])batch tensor: + batch tensor after cp:torch.Size([1, 1024]) +loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) + loss_maskbatch tensor after cp: torch.Size([1, 1024])attention_mask + batch tensor:torch.Size([1, 1, 256, 1024]) +attention_mask batch tensor after cp:torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +position_idsbatch tensor after cp: batch tensor: torch.Size([1, 256]) tokensposition_ids + torch.Size([1, 1024])torch.Size([1, 256]) + +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor: tokens batch tensor:torch.Size([1, 1024]) + tokensbatch tensor: labelsbatch tensor after cp: tokenstorch.Size([1, 1024]) + torch.Size([1, 256])torch.Size([1, 1024]) + +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor:batch tensor:batch tensor after cp: loss_masklabels labels torch.Size([1, 1024])torch.Size([1, 1024])torch.Size([1, 256]) + + +batch tensor after cp:batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256])batch tensor after cp: +tokens batch tensor after cp: tokens torch.Size([1, 256])loss_mask + torch.Size([1, 256])torch.Size([1, 256])batch tensor after cp: + + batch tensor after cp:batch tensor after cp:batch tensor after cp:labels labelsattention_masktokenstorch.Size([1, 256]) + torch.Size([1, 256])torch.Size([1, 1, 256, 1024])batch tensor after cp: +torch.Size([1, 256]) +batch tensor after cp: +loss_maskbatch tensor after cp: batch tensor after cp:loss_mask torch.Size([1, 256]) position_ids +labels torch.Size([1, 256])batch tensor after cp: torch.Size([1, 256]) + +torch.Size([1, 256])attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp:batch tensor after cp: + attention_maskposition_idsbatch tensor after cp: torch.Size([1, 256])torch.Size([1, 1, 256, 1024])loss_mask + + batch tensor after cp:torch.Size([1, 256]) +position_ids batch tensor after cp:torch.Size([1, 256]) +batch tensor:batch tensor:batch tensor after cp: loss_maskattention_maskloss_mask torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024])torch.Size([1, 256]) + + +batch tensor:batch tensor: batch tensor after cp: position_idsattention_maskattention_mask torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024]) + +torch.Size([1, 1, 256, 1024]) +batch tensor: batch tensor after cp:position_ids position_idstorch.Size([1, 1024]) +torch.Size([1, 256]) +attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256])batch tensor after cp: +batch tensor: tokens torch.Size([1, 1024]) + batch tensor after cp:tokens attention_masktorch.Size([1, 256]) +torch.Size([1, 1, 256, 1024]) +batch tensor after cp: batch tensor after cp:labels position_idstorch.Size([1, 256]) +torch.Size([1, 256])batch tensor after cp: +batch tensor: labels torch.Size([1, 1024]) + loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +Start exporting trace 5 +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +Done exporting trace 5 +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) + [2025-06-21 21:24:28] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 40.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labelsbatch tensor: torch.Size([1, 1024]) + batch tensor:tokens loss_mask torch.Size([1, 1024]) +batch tensor: torch.Size([1, 1024])attention_mask + torch.Size([1, 1, 1024, 1024])batch tensor: + batch tensor:labels position_ids torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor:batch tensor: position_ids torch.Size([1, 1024]) +tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp:batch tensor after cp: tokensloss_mask torch.Size([1, 256])torch.Size([1, 256]) +batch tensor: loss_mask torch.Size([1, 1024]) + +batch tensor after cp: batch tensor after cp:attention_mask labels torch.Size([1, 1, 256, 1024])torch.Size([1, 256]) + +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp:batch tensor after cp: loss_maskposition_ids torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor:batch tensor: position_ids torch.Size([1, 1024])tokens +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor: tokenstorch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp: labels torch.Size([1, 256])batch tensor after cp: +batch tensor: batch tensor:loss_mask labelstorch.Size([1, 1024]) + batch tensor after cp:tokens loss_mask torch.Size([1, 256]) +torch.Size([1, 256])batch tensor after cp: +torch.Size([1, 1024]) +batch tensor: batch tensor:attention_mask loss_mask torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024]) + + batch tensor after cp:attention_mask labelstorch.Size([1, 1, 256, 1024]) +torch.Size([1, 256])batch tensor after cp: +batch tensor:batch tensor: position_idsattention_mask torch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024]) + position_idsbatch tensor after cp: torch.Size([1, 256]) +loss_mask torch.Size([1, 256]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor:batch tensor: tokens tokens torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: batch tensor:labels labels torch.Size([1, 1024])torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) + +batch tensor: batch tensor:loss_mask loss_mask torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor:batch tensor: attention_maskattention_mask torch.Size([1, 1, 1024, 1024])torch.Size([1, 1, 1024, 1024]) + +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor:batch tensor: position_ids position_ids torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: tokens batch tensor after cp: tokens batch tensor after cp:torch.Size([1, 1024])torch.Size([1, 256]) + +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +tokensbatch tensor after cp: batch tensor: labelstorch.Size([1, 256])labels +torch.Size([1, 256]) +torch.Size([1, 1024])batch tensor after cp:batch tensor after cp: +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids batch tensor after cp:torch.Size([1, 1024]) +tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp:batch tensor after cp: attention_mask tokenstorch.Size([1, 1, 256, 1024]) +torch.Size([1, 256])batch tensor after cp: + position_idsbatch tensor after cp: torch.Size([1, 256])labels + torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_maskbatch tensor after cp: torch.Size([1, 1, 256, 1024])tokens + batch tensor after cp:torch.Size([1, 256]) +position_ids batch tensor after cp:torch.Size([1, 256]) +labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) + labelsbatch tensor:loss_mask loss_masktorch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +torch.Size([1, 256])torch.Size([1, 1024])batch tensor after cp: + + batch tensor after cp:batch tensor:loss_mask attention_maskattention_masktorch.Size([1, 256]) +torch.Size([1, 1, 1024, 1024])torch.Size([1, 1, 256, 1024])batch tensor after cp: + +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor: batch tensor after cp: attention_mask position_ids position_ids torch.Size([1, 1, 256, 1024])torch.Size([1, 1024])torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) + + +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024])batch tensor after cp: + tokensbatch tensor: labels torch.Size([1, 256])torch.Size([1, 1024]) + +batch tensor: batch tensor after cp:loss_mask labelstorch.Size([1, 1024]) +torch.Size([1, 256]) +batch tensor:batch tensor after cp: attention_maskloss_mask torch.Size([1, 256])torch.Size([1, 1, 1024, 1024]) + +batch tensor after cp:batch tensor: attention_maskposition_ids torch.Size([1, 1, 256, 1024])torch.Size([1, 1024]) + +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +Start exporting trace 6 +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +Done exporting trace 6 +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) + [2025-06-21 21:24:28] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 40.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids batch tensor:torch.Size([1, 1024]) + tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask batch tensor after cp:torch.Size([1, 1, 256, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) + batch tensor after cp:tokens batch tensor after cp: position_ids torch.Size([1, 256])torch.Size([1, 256])tokens + + batch tensor after cp:torch.Size([1, 256]) +labels batch tensor after cp:torch.Size([1, 256]) +labels batch tensor after cp: torch.Size([1, 256])loss_mask +batch tensor: batch tensor:tokens tokens torch.Size([1, 1024]) +batch tensor:torch.Size([1, 1024]) labels + torch.Size([1, 256])batch tensor after cp: + loss_maskbatch tensor after cp: torch.Size([1, 256])attention_mask + batch tensor after cp:torch.Size([1, 1, 256, 1024]) +attention_maskbatch tensor after cp: torch.Size([1, 1, 256, 1024])position_ids + batch tensor:torch.Size([1, 1024]) + labelsbatch tensor: batch tensor:torch.Size([1, 1024])loss_mask +batch tensor after cp: torch.Size([1, 256])position_ids + torch.Size([1, 256]) + batch tensor:torch.Size([1, 1024])tokens +batch tensor: tokens torch.Size([1, 1024]) + loss_maskbatch tensor: torch.Size([1, 1024])attention_mask + torch.Size([1, 1024])batch tensor:torch.Size([1, 1, 1024, 1024]) + +batch tensor:batch tensor: labels tokenstorch.Size([1, 1024]) + batch tensor:batch tensor: attention_mask labels position_ids torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1024]) +batch tensor: +batch tensor: loss_maskposition_ids torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: loss_mask torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor:batch tensor: attention_masklabels torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024]) + +batch tensor:batch tensor: position_idsloss_mask torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_maskbatch tensor after cp: tokenstorch.Size([1, 1, 256, 1024]) +batch tensor after cp:torch.Size([1, 256]) +position_idsbatch tensor after cp: torch.Size([1, 256])labels +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) + torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask batch tensor after cp:torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) + batch tensor after cp:tokens batch tensor after cp: torch.Size([1, 256])tokensattention_mask + batch tensor after cp:torch.Size([1, 256])torch.Size([1, 1, 256, 1024]) + +batch tensor after cp: position_ids torch.Size([1, 256]) +labelsbatch tensor after cp:batch tensor after cp: torch.Size([1, 256])labelsposition_ids +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) + batch tensor after cp: torch.Size([1, 256])torch.Size([1, 256]) +loss_mask +batch tensor after cp: torch.Size([1, 256])loss_mask + torch.Size([1, 256])batch tensor after cp: + attention_maskbatch tensor after cp: attention_masktorch.Size([1, 1, 256, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor after cp:torch.Size([1, 1, 256, 1024]) +position_idsbatch tensor after cp: position_idstorch.Size([1, 256]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +Start exporting trace 7 +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +Done exporting trace 7 +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024])batch tensor: +batch tensor: tokenslabels batch tensor:torch.Size([1, 1024]) +batch tensor:torch.Size([1, 1024]) +loss_masktokens batch tensor:torch.Size([1, 1024]) +labels batch tensor:torch.Size([1, 1024])torch.Size([1, 1024]) + +attention_maskbatch tensor: batch tensor: torch.Size([1, 1, 1024, 1024]) loss_mask +labels batch tensor: torch.Size([1, 1024]) torch.Size([1, 1024]) +position_ids + batch tensor:batch tensor:torch.Size([1, 1024]) + [2025-06-21 21:24:28] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 39.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +attention_maskloss_mask torch.Size([1, 1024])torch.Size([1, 1, 1024, 1024]) + +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor:batch tensor: attention_maskposition_ids torch.Size([1, 1, 1024, 1024])torch.Size([1, 1024]) +batch tensor: +batch tensor: position_ids torch.Size([1, 1024]) +tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: tokens batch tensor after cp: tokens torch.Size([1, 1024])torch.Size([1, 256]) + +batch tensor after cp: batch tensor:labels labelstorch.Size([1, 256]) +torch.Size([1, 1024])batch tensor after cp: + loss_maskbatch tensor: torch.Size([1, 256])loss_mask +batch tensor: labels torch.Size([1, 1024]) + batch tensor after cp:torch.Size([1, 1024]) +attention_mask batch tensor: torch.Size([1, 1, 256, 1024])attention_mask + batch tensor after cp:torch.Size([1, 1, 1024, 1024]) +position_ids batch tensor:torch.Size([1, 256]) +batch tensor:batch tensor:batch tensor after cp: tokens batch tensor:loss_masktokenstorch.Size([1, 256]) +position_ids torch.Size([1, 1024]) + tokenstorch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor after cp:batch tensor: +batch tensor after cp: tokens batch tensor after cp:torch.Size([1, 256]) +tokensbatch tensor after cp: labelstorch.Size([1, 256]) +torch.Size([1, 256]) + torch.Size([1, 1024])batch tensor:labels +torch.Size([1, 256])attention_masklabels +batch tensor after cp:batch tensor after cp: labelsloss_mask batch tensor after cp:torch.Size([1, 256]) torch.Size([1, 256]) +tokens + batch tensor after cp:batch tensor after cp: torch.Size([1, 256]) attention_mask + batch tensor after cp:batch tensor: torch.Size([1, 1, 1024, 1024]) + torch.Size([1, 1024])labelsbatch tensor: + loss_maskbatch tensor after cp:torch.Size([1, 1, 256, 1024]) +torch.Size([1, 256])labelsbatch tensor after cp: + torch.Size([1, 256])batch tensor after cp:position_idsbatch tensor after cp: +loss_mask batch tensor:torch.Size([1, 1024]) +torch.Size([1, 256])position_ids +loss_maskbatch tensor: batch tensor after cp:torch.Size([1, 1024]) torch.Size([1, 1024]) +loss_mask + attention_masktorch.Size([1, 256])batch tensor after cp:tokens + attention_maskbatch tensor:torch.Size([1, 1024]) +loss_mask torch.Size([1, 1, 256, 1024]) torch.Size([1, 256]) +torch.Size([1, 256]) +batch tensor after cp: +batch tensor after cp: position_idsbatch tensor after cp:labels attention_masktorch.Size([1, 256])torch.Size([1, 256]) + + attention_maskbatch tensor:torch.Size([1, 1, 256, 1024]) +batch tensor after cp:attention_masktorch.Size([1, 1, 1024, 1024]) batch tensor after cp: +torch.Size([1, 1, 1024, 1024])tokens +torch.Size([1, 1, 256, 1024])batch tensor after cp: + batch tensor after cp:loss_mask position_idstorch.Size([1, 256]) +torch.Size([1, 256])batch tensor after cp: + attention_mask torch.Size([1, 1, 256, 1024]) + batch tensor:position_ids batch tensor:torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +position_ids torch.Size([1, 256])position_idsbatch tensor after cp:torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) + +torch.Size([1, 1024])labels + torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +Start exporting trace 8 +batch tensor after cp: position_ids torch.Size([1, 256]) +Done exporting trace 8 +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor:batch tensor: loss_mask torch.Size([1, 1024]) +tokensbatch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1024])batch tensor: +position_ids batch tensor:torch.Size([1, 1024]) +labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor:batch tensor: loss_mask tokens torch.Size([1, 1024]) +batch tensor: attention_masktorch.Size([1, 1024]) +torch.Size([1, 1, 1024, 1024]) +batch tensor:batch tensor: labelsposition_ids torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp:batch tensor after cp: labelslabels torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor after cp:batch tensor after cp: loss_maskloss_mask torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor:batch tensor: tokens tokens torch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor: batch tensor:labels labelstorch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor: + loss_maskbatch tensor: torch.Size([1, 1024])loss_mask + torch.Size([1, 1024])batch tensor: + attention_mask batch tensor:torch.Size([1, 1, 1024, 1024]) +attention_mask batch tensor: torch.Size([1, 1, 1024, 1024])position_ids + torch.Size([1, 1024])batch tensor: + position_ids torch.Size([1, 1024]) +batch tensor after cp:batch tensor after cp: attention_maskattention_mask torch.Size([1, 1, 256, 1024])torch.Size([1, 1, 256, 1024]) + +batch tensor after cp:batch tensor after cp: position_idsposition_ids torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokensbatch tensor after cp: torch.Size([1, 256])tokens + batch tensor after cp:torch.Size([1, 256]) labels + batch tensor after cp:torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +labelsbatch tensor after cp: torch.Size([1, 256])loss_mask + batch tensor after cp:torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +loss_maskbatch tensor after cp: torch.Size([1, 256])attention_mask + batch tensor after cp:torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +attention_maskbatch tensor after cp: torch.Size([1, 1, 256, 1024]) +position_ids batch tensor after cp:torch.Size([1, 256]) +position_ids torch.Size([1, 256]) + [2025-06-21 21:24:28] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 40.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor:batch tensor: labels torch.Size([1, 1024])tokens + batch tensor: loss_mask torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: batch tensor:attention_mask labels torch.Size([1, 1, 1024, 1024]) +torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256])batch tensor after cp: +batch tensor:batch tensor: position_idsloss_mask torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_idsbatch tensor: torch.Size([1, 1024]) +batch tensor after cp: tokensattention_mask torch.Size([1, 1, 256, 1024])torch.Size([1, 256]) + + tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +batch tensor after cp: batch tensor after cp:position_ids labels torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor after cp: batch tensor after cp:labels labelstorch.Size([1, 256]) +batch tensor:batch tensor: attention_mask tokenstorch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024])torch.Size([1, 1024]) + +torch.Size([1, 256])batch tensor after cp: + batch tensor after cp:loss_mask loss_masktorch.Size([1, 256]) +torch.Size([1, 256])batch tensor after cp: +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 1024]) + attention_maskbatch tensor after cp: torch.Size([1, 1, 256, 1024])attention_mask + batch tensor after cp:torch.Size([1, 1, 256, 1024]) +position_ids batch tensor after cp:torch.Size([1, 256]) +position_ids torch.Size([1, 256]) +batch tensor: batch tensor after cp:attention_mask tokens torch.Size([1, 1, 1024, 1024])torch.Size([1, 256]) + +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: batch tensor:labels position_idstorch.Size([1, 256])batch tensor: +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: batch tensor:torch.Size([1, 1024]) tokenstokens +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 1024]) +batch tensor: loss_maskloss_mask torch.Size([1, 256]) +batch tensor: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024])torch.Size([1, 1024]) + batch tensor:torch.Size([1, 1024]) + +batch tensor: position_ids torch.Size([1, 1024]) +labelsbatch tensor: batch tensor after cp:attention_masktorch.Size([1, 1024]) position_ids +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +torch.Size([1, 1, 1024, 1024])torch.Size([1, 256])batch tensor: + +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor:loss_mask position_idstorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor: attention_mask torch.Size([1, 1, 1024, 1024]) +batch tensor: position_ids torch.Size([1, 1024]) +Start exporting trace 9 +batch tensor after cp: tokens torch.Size([1, 256]) +Done exporting trace 9 +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask batch tensor after cp:torch.Size([1, 256]) +[after training is done] datetime: 2025-06-21 21:24:28 + tokensbatch tensor after cp: attention_masktorch.Size([1, 256]) +torch.Size([1, 1, 256, 1024])batch tensor after cp: +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format + batch tensor after cp:labelsbatch tensor after cp: torch.Size([1, 256])position_idstokens + batch tensor after cp: torch.Size([1, 256])torch.Size([1, 256])loss_mask + + torch.Size([1, 256])batch tensor after cp: + labelsbatch tensor after cp: torch.Size([1, 256])attention_mask + batch tensor after cp:torch.Size([1, 1, 256, 1024]) +loss_mask batch tensor after cp:torch.Size([1, 256]) +position_ids batch tensor after cp:torch.Size([1, 256]) +attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp: tokens torch.Size([1, 256]) +batch tensor after cp: labels torch.Size([1, 256]) +batch tensor after cp: loss_mask torch.Size([1, 256]) +batch tensor after cp: attention_mask torch.Size([1, 1, 256, 1024]) +batch tensor after cp: position_ids torch.Size([1, 256]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([1, 256])torch.Size([1, 256]) + +batch tensor after cp: batch tensor after cp:labels labelstorch.Size([1, 256]) +torch.Size([1, 256])batch tensor after cp: + batch tensor after cp:loss_mask loss_masktorch.Size([1, 256]) +torch.Size([1, 256])batch tensor after cp: + batch tensor after cp:attention_mask attention_masktorch.Size([1, 1, 256, 1024]) +torch.Size([1, 1, 256, 1024])batch tensor after cp: + position_idsbatch tensor after cp: torch.Size([1, 256])position_ids + torch.Size([1, 256]) + [2025-06-21 21:24:28] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 38.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.05521392822265625 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.05530095100402832 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.056468963623046875 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.05534553527832031 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.05645322799682617 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.05537843704223633 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.056561946868896484 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.05661821365356445 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.056636810302734375 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.055429935455322266 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.05668997764587402 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.05576634407043457 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.05709052085876465 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.0558323860168457 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.0572667121887207 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.05671429634094238 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(216006656), 0), (np.int64(217055232), 1), (np.int64(218103808), 2), (np.int64(214321152), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(216006656), 0), (np.int64(217055232), 1), (np.int64(218103808), 2), (np.int64(214321152), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(216006656), 0), (np.int64(217055232), 1), (np.int64(218103808), 2), (np.int64(214321152), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(216006656), 0), (np.int64(217055232), 1), (np.int64(218103808), 2), (np.int64(214321152), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.6845502853393555 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.6815500259399414 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.681800127029419 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.684858798980713 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.684753179550171 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.6849796772003174 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.6821422576904297 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.6850335597991943 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.682234764099121 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.6851317882537842 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.6823842525482178 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.6854009628295898 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.6855370998382568 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.6829547882080078 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.012124300003051758 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.6837058067321777 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, plan time: 0.009581804275512695 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, plan time: 0.009317874908447266 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, plan time: 0.008994817733764648 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, plan time: 0.009400129318237305 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, plan time: 0.00751042366027832 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, plan time: 0.008825540542602539 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7564955 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.756496 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7564979 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7565024 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7565105 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7565174 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, plan time: 0.007221221923828125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, plan time: 0.008043289184570312 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.103515625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7565603 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.604194641113281e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.007224321365356445 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.004426240921020508 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.008754491806030273 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.007820844650268555 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.651878356933594e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.756567 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.510185241699219e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.0044291019439697266 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.4849853515625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7558193 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7558181 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.751319885253906e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7558227 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7558258 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.008276939392089844 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.557868957519531e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7558408 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.131431579589844e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7558613 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.389617919921875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.510185241699219e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.031990051269531e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.700920104980469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.963180541992188e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.002315521240234375 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.486343383789062e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7559419 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.222724914550781e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.009188413619995117 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541070.7602363 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.91278076171875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.056465864181518555 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05659365653991699 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.813474 rank: 12, write(async) time: 0.0569608211517334 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.8135445 rank: 15, write(async) time: 0.057047367095947266 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05687737464904785 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.8138986 rank: 14, write(async) time: 0.05733227729797363 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05863761901855469 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.815557 rank: 10, write(async) time: 0.05906200408935547 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.059934377670288086 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.8169122 rank: 8, write(async) time: 0.06039261817932129 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.060242652893066406 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.8171532 rank: 11, write(async) time: 0.060658931732177734 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06194734573364258 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.8188777 rank: 9, write(async) time: 0.06237506866455078 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06541657447814941 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.822399 rank: 13, write(async) time: 0.06583762168884277 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.07361721992492676 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.07370710372924805 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.829931 rank: 5, write(async) time: 0.07410264015197754 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.8300238 rank: 7, write(async) time: 0.07419872283935547 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0753169059753418 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.8316696 rank: 4, write(async) time: 0.07580924034118652 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0756223201751709 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.07574892044067383 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.8319645 rank: 1, write(async) time: 0.07612109184265137 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.8321009 rank: 3, write(async) time: 0.07628273963928223 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.07927894592285156 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.835631 rank: 6, write(async) time: 0.07980680465698242 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.07980728149414062 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.8362887 rank: 2, write(async) time: 0.08034610748291016 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.08889651298522949 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541070.8497772 rank: 0, write(async) time: 0.08953976631164551 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 1.9550323486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 1.6450881958007812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 1.52587890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 2.193450927734375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 0.03024601936340332 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 0.02955484390258789 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.9073486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 0.030445575714111328 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 0.03084564208984375 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 0.03493952751159668 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 0.03403830528259277 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 2.002716064453125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 2.8133392333984375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 2.3126602172851562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 0.03278231620788574 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 0.036424875259399414 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.035567283630371094 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.03290152549743652 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.03373122215270996 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.9550323486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.03673076629638672 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.03305673599243164 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.03252530097961426 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.03409719467163086 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.051535606384277344 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 135168, before: 1672806400, after: 1672941568 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 217088, before: 1660452864, after: 1660669952 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 323584, before: 1667125248, after: 1667448832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 13197312, before: 1990250496, after: 2003447808 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 106156032, before: 1673994240, after: 1780150272 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 106213376, before: 1669292032, after: 1775505408 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114270208, before: 1731952640, after: 1846222848 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 112836608, before: 1696591872, after: 1809428480 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110252032, before: 1674088448, after: 1784340480 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108765184, before: 1683869696, after: 1792634880 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 109649920, before: 1668726784, after: 1778376704 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110264320, before: 1669398528, after: 1779662848 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114196480, before: 1692979200, after: 1807175680 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 105848832, before: 1668726784, after: 1774575616 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108941312, before: 1670021120, after: 1778962432 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110522368, before: 1692979200, after: 1803501568 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114380800, before: 1731936256, after: 1846317056 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114012160, before: 1693544448, after: 1807556608 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108888064, before: 1668345856, after: 1777233920 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 109547520, before: 1681412096, after: 1790959616 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.3244588, rank: 15, write(sync,parallel): 0.3851451873779297 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114233344, before: 1668345856, after: 1782579200 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114335744, before: 1681412096, after: 1795747840 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 109686784, before: 1693544448, after: 1803231232 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110190592, before: 1696591872, after: 1806782464 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.3513072, rank: 13, write(sync,parallel): 0.38546276092529297 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.3528674, rank: 14, write(sync,parallel): 0.4134836196899414 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.46s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114380800, before: 1670004736, after: 1784385536 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.3627598, rank: 12, write(sync,parallel): 0.41977715492248535 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 118321152, before: 1683869696, after: 1802190848 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.3638546, rank: 8, write(sync,parallel): 0.4134538173675537 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114327552, before: 1684918272, after: 1799245824 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109006848, before: 1684914176, after: 1793921024 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.3792844, rank: 6, write(sync,parallel): 0.40203022956848145 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.3819811, rank: 11, write(sync,parallel): 0.425884485244751 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.3855262, rank: 9, write(sync,parallel): 0.43560171127319336 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.385746, rank: 10, write(sync,parallel): 0.43881845474243164 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.49s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.47s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.50s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.49s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.4116266, rank: 4, write(sync,parallel): 0.4267120361328125 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.414406, rank: 5, write(sync,parallel): 0.432492733001709 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.48s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.50s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.51s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.52s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.4322033, rank: 7, write(sync,parallel): 0.4424712657928467 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.51s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.51s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212434944, before: 1667125248, after: 1879560192 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.54s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212156416, before: 1660534784, after: 1872691200 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212455424, before: 1672806400, after: 1885261824 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212180992, before: 1990250496, after: 2202431488 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.508873, rank: 2, write(sync,parallel): 0.5178689956665039 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.5309758, rank: 3, write(sync,parallel): 0.5431654453277588 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.535939, rank: 1, write(sync,parallel): 0.5489940643310547 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.59s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541071.566328, rank: 0, write(sync,parallel): 0.528287410736084 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.62s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.62s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.64s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.6234972, 7, gather: 0.14194917678833008 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.6236563, 5, gather: 0.1697556972503662 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.6237273, 6, gather: 0.2031712532043457 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.623753, 4, gather: 0.17210078239440918 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.6237845, 3, gather: 0.055008649826049805 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.6238854, 1, gather: 0.04968905448913574 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.624624, 9, gather: 0.19391918182373047 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.6246269, 10, gather: 0.19844388961791992 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.6238995, 2, gather: 0.07734560966491699 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.6248832, 12, gather: 0.22063827514648438 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.6261172, 0, gather: 0.005057334899902344 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.6249218, 15, gather: 0.26125288009643555 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0074s +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.624978, 8, gather: 0.2197740077972412 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.6250465, 14, gather: 0.23090457916259766 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.625024, 11, gather: 0.202833890914917 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541071.6250308, 13, gather: 0.2296905517578125 +Running ctx_length=2048, TP_SIZE=4, CP_SIZE=4, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 2048 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 2048 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 16, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 2048 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 16 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 2048 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 2048 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 16 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.052 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.994 seconds +time to initialize megatron (seconds): 9.458 +[after megatron is initialized] datetime: 2025-06-21 21:25:49 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +>>> decoder +>>> embedding +>>> decoder +>>> output_layer +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (148442112 elements, 148442112 padded size): + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.position_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (2.74, 3.19) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:25:49 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=2048, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.007434 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33296 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002827 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33281 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002653 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33343 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:25:49 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (369.25, 400.18) + train/valid/test-data-iterators-setup ..........: (21.82, 170.51) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:25:49 +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: attention_mask batch tensor:torch.Size([1, 1, 2048, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: position_idstokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB + [2025-06-21 21:26:02] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 12617.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 7] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2024.0 | max reserved: 2024.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2022.0 | max reserved: 2022.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2024.0 | max reserved: 2024.0 +[Rank 14] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2024.0 | max reserved: 2024.0 +[Rank 15] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2024.0 | max reserved: 2024.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2024.0 | max reserved: 2024.0 +[Rank 13] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2024.0 | max reserved: 2024.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2022.0 | max reserved: 2022.0 +[Rank 12] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2024.0 | max reserved: 2024.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2022.0 | max reserved: 2022.0 +[Rank 11] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2024.0 | max reserved: 2024.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2024.0 | max reserved: 2024.0 +[Rank 10] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2024.0 | max reserved: 2024.0 +[Rank 8] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2024.0 | max reserved: 2024.0 +[Rank 3] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2022.0 | max reserved: 2022.0 +[Rank 9] (after 1 iterations) memory (MB) | allocated: 1853.72900390625 | max allocated: 1853.72998046875 | reserved: 2024.0 | max reserved: 2024.0 +batch tensor:batch tensor: tokenstokens torch.Size([1, 2048])torch.Size([1, 2048]) + +batch tensor:batch tensor: batch tensor: labelslabels torch.Size([1, 2048])tokens +batch tensor: tokensbatch tensor: tokens torch.Size([1, 2048]) +batch tensor:torch.Size([1, 2048]) +labels torch.Size([1, 2048])batch tensor: + batch tensor:labels loss_masktorch.Size([1, 2048]) +torch.Size([1, 2048])batch tensor:batch tensor: + loss_maskbatch tensor: tokens torch.Size([1, 2048])attention_mask + batch tensor:torch.Size([1, 1, 2048, 2048]) +torch.Size([1, 2048])attention_mask + batch tensor: torch.Size([1, 1, 2048, 2048])batch tensor:position_ids + labelsbatch tensor:torch.Size([1, 2048]) +torch.Size([1, 2048]) batch tensor: +batch tensor: loss_maskloss_mask torch.Size([1, 2048])torch.Size([1, 2048])torch.Size([1, 2048]) + + +batch tensor:batch tensor:batch tensor: attention_masklabelsattention_mask torch.Size([1, 2048])torch.Size([1, 1, 2048, 2048])torch.Size([1, 1, 2048, 2048]) + + + torch.Size([1, 2048])position_ids + batch tensor:torch.Size([1, 2048]) +loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: batch tensor:batch tensor:loss_mask position_idsposition_idstorch.Size([1, 2048]) +torch.Size([1, 2048])torch.Size([1, 2048]) + +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp:batch tensor: batch tensor after cp:tokens tokens torch.Size([1, 512])tokens +torch.Size([1, 512]) +batch tensor after cp: batch tensor after cp:labels torch.Size([1, 2048])labelstorch.Size([1, 512]) + +torch.Size([1, 512])batch tensor after cp:batch tensor: +batch tensor: tokensbatch tensor: tokens torch.Size([1, 2048]) + loss_mask batch tensor after cp:labels torch.Size([1, 512]) loss_mask +torch.Size([1, 2048]) batch tensor after cp:batch tensor after cp:torch.Size([1, 512]) +attention_mask +tokens batch tensor after cp:batch tensor:torch.Size([1, 1, 512, 2048]) +batch tensor:torch.Size([1, 2048]) labels + torch.Size([1, 2048]) +batch tensor:batch tensor: labelsloss_mask torch.Size([1, 2048])torch.Size([1, 2048]) + +batch tensor:batch tensor: loss_maskattention_mask torch.Size([1, 2048])torch.Size([1, 1, 2048, 2048]) + +attention_maskloss_masktorch.Size([1, 512])batch tensor after cp: +torch.Size([1, 1, 512, 2048])torch.Size([1, 2048])position_idsbatch tensor after cp: + + torch.Size([1, 512])labels +batch tensor after cp: batch tensor: torch.Size([1, 512]) position_ids +batch tensor:batch tensor: position_idsattention_mask torch.Size([1, 2048])torch.Size([1, 1, 2048, 2048]) + +attention_mask batch tensor after cp: torch.Size([1, 512]) +torch.Size([1, 1, 2048, 2048])loss_mask +torch.Size([1, 512]) +batch tensor:batch tensor after cp: attention_maskposition_ids torch.Size([1, 1, 512, 2048]) +torch.Size([1, 2048])batch tensor after cp: + position_ids torch.Size([1, 512]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels batch tensor after cp:torch.Size([1, 512])batch tensor after cp: +batch tensor: tokens torch.Size([1, 2048]) + batch tensor after cp:tokens tokensloss_mask torch.Size([1, 512]) + torch.Size([1, 512])torch.Size([1, 512]) + +batch tensor after cp: batch tensor after cp:labelsbatch tensor after cp: attention_masktorch.Size([1, 512]) labels +torch.Size([1, 1, 512, 2048]) batch tensor after cp: +torch.Size([1, 512]) +batch tensor after cp:loss_mask batch tensor after cp: position_ids torch.Size([1, 512]) loss_masktorch.Size([1, 512]) +torch.Size([1, 512]) + +batch tensor after cp: batch tensor after cp:attention_mask attention_masktorch.Size([1, 1, 512, 2048]) +batch tensor: labels batch tensor after cp:torch.Size([1, 2048]) +torch.Size([1, 1, 512, 2048])batch tensor after cp: + position_idsbatch tensor after cp: torch.Size([1, 512])position_ids + torch.Size([1, 512]) +tokensbatch tensor: loss_masktorch.Size([1, 512]) torch.Size([1, 2048]) + +batch tensor after cp: batch tensor:labels attention_masktorch.Size([1, 512]) +torch.Size([1, 1, 2048, 2048])batch tensor after cp: +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([1, 512])torch.Size([1, 512]) + +batch tensor after cp: batch tensor after cp:labels labelstorch.Size([1, 512]) + loss_maskbatch tensor: torch.Size([1, 512])position_ids + batch tensor after cp:torch.Size([1, 2048]) +attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +torch.Size([1, 512])batch tensor after cp: + loss_mask batch tensor after cp:torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +loss_maskbatch tensor after cp: torch.Size([1, 512])attention_mask + batch tensor after cp: torch.Size([1, 1, 512, 2048])attention_mask + batch tensor after cp: torch.Size([1, 1, 512, 2048])position_ids + batch tensor after cp:torch.Size([1, 512]) +position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:26:02] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 84.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels batch tensor:torch.Size([1, 2048]) +batch tensor: loss_masktokens torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 2048])torch.Size([1, 1, 2048, 2048]) + +batch tensor: batch tensor:position_ids labelstorch.Size([1, 2048]) +torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048])batch tensor after cp: +batch tensor: tokens torch.Size([1, 2048]) + batch tensor:tokens position_ids torch.Size([1, 2048]) +torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor:batch tensor after cp: attention_masktokens torch.Size([1, 1, 512, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512])torch.Size([1, 2048]) + +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp:batch tensor: tokensattention_mask torch.Size([1, 1, 2048, 2048])torch.Size([1, 512]) + +batch tensor: batch tensor after cp:position_ids labelstorch.Size([1, 2048]) +torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512])batch tensor: +batch tensor after cp: attention_mask tokens torch.Size([1, 1, 512, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 2048])torch.Size([1, 512]) + +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: attention_mask batch tensor:torch.Size([1, 1, 2048, 2048]) +batch tensor: position_idstokens torch.Size([1, 2048]) +torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp:batch tensor: tokenslabels torch.Size([1, 2048])torch.Size([1, 512]) + +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor:batch tensor after cp: loss_masklabels torch.Size([1, 2048])torch.Size([1, 512]) + +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: batch tensor:loss_mask attention_mask torch.Size([1, 512]) +torch.Size([1, 1, 2048, 2048])batch tensor after cp: +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) + attention_maskbatch tensor: torch.Size([1, 1, 512, 2048])position_ids + batch tensor after cp:torch.Size([1, 2048]) position_ids +torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:26:02] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 50.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: tokens batch tensor:torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) + tokensbatch tensor: labels torch.Size([1, 2048]) +batch tensor:torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +loss_mask torch.Size([1, 2048])batch tensor: + labelsbatch tensor: attention_masktorch.Size([1, 2048]) +torch.Size([1, 1, 2048, 2048])batch tensor: +batch tensor: position_ids torch.Size([1, 2048]) + batch tensor:loss_mask position_idstorch.Size([1, 2048]) +torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor:batch tensor: tokenstokens torch.Size([1, 2048])torch.Size([1, 2048]) + +batch tensor:batch tensor: labelslabels torch.Size([1, 2048])torch.Size([1, 2048]) + +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor:batch tensor: loss_maskloss_mask torch.Size([1, 2048])torch.Size([1, 2048]) + +batch tensor:batch tensor: attention_maskattention_mask torch.Size([1, 1, 2048, 2048])torch.Size([1, 1, 2048, 2048]) + +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor:batch tensor: position_idsposition_ids torch.Size([1, 2048])torch.Size([1, 2048]) + +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp:batch tensor after cp: labels tokenstorch.Size([1, 512]) +batch tensor after cp:torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +loss_maskbatch tensor after cp: labelstorch.Size([1, 512]) +torch.Size([1, 512])batch tensor after cp: +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: attention_maskloss_mask torch.Size([1, 512])torch.Size([1, 1, 512, 2048]) + +batch tensor after cp:batch tensor after cp: attention_maskposition_ids torch.Size([1, 1, 512, 2048])torch.Size([1, 512]) + +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor:batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor:tokens position_ids torch.Size([1, 2048]) +torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:26:02] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 42.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor:batch tensor: position_ids torch.Size([1, 2048])tokens +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) + torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp:batch tensor: attention_masktokens torch.Size([1, 1, 2048, 2048]) +torch.Size([1, 512])batch tensor: + batch tensor after cp:position_ids labelstorch.Size([1, 2048]) +torch.Size([1, 512]) +batch tensor after cp:batch tensor after cp: loss_masktokens torch.Size([1, 512]) +torch.Size([1, 512])batch tensor after cp: + attention_maskbatch tensor after cp: labelstorch.Size([1, 1, 512, 2048]) +torch.Size([1, 512])batch tensor after cp: + batch tensor after cp:position_ids loss_masktorch.Size([1, 512]) +torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: batch tensor:attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +tokensbatch tensor: position_ids torch.Size([1, 2048]) +torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp:batch tensor after cp: tokensattention_mask torch.Size([1, 512]) +torch.Size([1, 1, 512, 2048]) +batch tensor after cp: batch tensor after cp:labels position_idstorch.Size([1, 512]) +torch.Size([1, 512])batch tensor after cp: +batch tensor: position_ids torch.Size([1, 2048]) + loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor:batch tensor after cp: tokenstokens torch.Size([1, 512]) +batch tensor after cp:torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +labels batch tensor:torch.Size([1, 512]) +labelsbatch tensor after cp: loss_masktorch.Size([1, 2048]) +torch.Size([1, 512])batch tensor: + batch tensor after cp:loss_mask attention_masktorch.Size([1, 2048]) +torch.Size([1, 1, 512, 2048]) +batch tensor: batch tensor after cp:attention_mask position_ids torch.Size([1, 1, 2048, 2048])torch.Size([1, 512]) + +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:26:02] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 48.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor:batch tensor: tokens tokenstorch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: torch.Size([1, 2048])labels +torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: batch tensor:loss_mask labelstorch.Size([1, 2048]) + torch.Size([1, 2048])batch tensor: + batch tensor:attention_mask loss_mask torch.Size([1, 1, 2048, 2048])torch.Size([1, 2048]) + +batch tensor: batch tensor:position_ids attention_masktorch.Size([1, 2048]) +torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp:batch tensor: loss_masktokens torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +torch.Size([1, 512])batch tensor: +attention_mask batch tensor after cp: torch.Size([1, 1, 2048, 2048])labels + torch.Size([1, 512])batch tensor: +batch tensor: labels torch.Size([1, 2048]) +position_ids batch tensor after cp:torch.Size([1, 2048]) +loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labelsbatch tensor after cp: torch.Size([1, 512]) +tokensbatch tensor after cp: loss_masktorch.Size([1, 512]) +torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: batch tensor after cp:labels attention_masktorch.Size([1, 512]) +torch.Size([1, 1, 512, 2048]) +batch tensor after cp:batch tensor after cp: loss_maskposition_ids torch.Size([1, 512])torch.Size([1, 512]) + +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor:batch tensor after cp: tokenstokens torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp:torch.Size([1, 2048]) labels +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) + batch tensor:torch.Size([1, 512]) + batch tensor after cp:labels loss_mask torch.Size([1, 2048])torch.Size([1, 512]) + +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor:batch tensor after cp: loss_maskattention_mask torch.Size([1, 2048])torch.Size([1, 1, 512, 2048]) + +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor:batch tensor after cp: attention_maskposition_ids torch.Size([1, 512])torch.Size([1, 1, 2048, 2048]) + +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: position_ids torch.Size([1, 2048])batch tensor after cp: +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) + tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:26:02] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 41.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor:batch tensor after cp: tokenstokens torch.Size([1, 512]) +batch tensor after cp:torch.Size([1, 2048]) labels + torch.Size([1, 512])batch tensor: + labelsbatch tensor after cp: torch.Size([1, 2048])loss_mask + batch tensor: torch.Size([1, 512])loss_mask +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) + torch.Size([1, 2048])batch tensor after cp:batch tensor: +attention_maskbatch tensor: tokensattention_masktorch.Size([1, 1, 512, 2048]) +torch.Size([1, 1, 2048, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: batch tensor:torch.Size([1, 2048])position_ids position_ids +torch.Size([1, 2048])torch.Size([1, 512]) +batch tensor: + labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor:batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048])tokens + batch tensor after cp: position_ids torch.Size([1, 512]) +torch.Size([1, 2048]) +batch tensor after cp:batch tensor: labels tokenstorch.Size([1, 2048]) +torch.Size([1, 512])batch tensor: + loss_maskbatch tensor after cp: torch.Size([1, 2048])labels + batch tensor:torch.Size([1, 512]) +attention_maskbatch tensor after cp: loss_masktorch.Size([1, 1, 2048, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor:torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) + position_ids torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor:batch tensor: tokens tokens torch.Size([1, 2048]) +torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor:batch tensor: labelslabels torch.Size([1, 2048])torch.Size([1, 2048]) + +batch tensor:batch tensor: loss_maskloss_mask torch.Size([1, 2048])torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) + +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor:batch tensor:batch tensor: attention_mask attention_mask torch.Size([1, 1, 2048, 2048])torch.Size([1, 1, 2048, 2048])tokens + + batch tensor:batch tensor: position_idsposition_ids torch.Size([1, 2048])torch.Size([1, 2048])batch tensor after cp: +torch.Size([1, 2048]) + +tokens batch tensor: torch.Size([1, 512])labels + batch tensor after cp:torch.Size([1, 2048]) +labels batch tensor:torch.Size([1, 512]) +loss_mask batch tensor after cp:torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +loss_mask batch tensor:torch.Size([1, 512]) +attention_mask batch tensor after cp: torch.Size([1, 1, 2048, 2048])attention_mask + batch tensor:torch.Size([1, 1, 512, 2048]) +position_ids batch tensor after cp: torch.Size([1, 2048])position_ids + torch.Size([1, 512]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([1, 512])torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) + +batch tensor after cp: labelsbatch tensor after cp: torch.Size([1, 512])labels + batch tensor after cp:torch.Size([1, 512]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp:loss_maskbatch tensor after cp: tokensloss_mask torch.Size([1, 512]) torch.Size([1, 512]) +torch.Size([1, 512]) + +batch tensor after cp:batch tensor after cp: batch tensor after cp: attention_mask labels attention_mask torch.Size([1, 1, 512, 2048])torch.Size([1, 512])torch.Size([1, 1, 512, 2048]) + + +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp:batch tensor after cp: batch tensor after cp:loss_mask position_idsposition_idstorch.Size([1, 512]) torch.Size([1, 512]) + +torch.Size([1, 512])batch tensor after cp: + attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:26:02] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 44.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048])batch tensor: +batch tensor after cp:batch tensor: loss_mask torch.Size([1, 512]) tokens + batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: tokenslabels torch.Size([1, 2048]) +batch tensor: loss_masktorch.Size([1, 2048]) torch.Size([1, 2048]) + +batch tensor:batch tensor: attention_mask labels torch.Size([1, 1, 2048, 2048])torch.Size([1, 2048]) + +torch.Size([1, 2048])batch tensor after cp: + position_ids batch tensor:torch.Size([1, 512]) +labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: position_idsbatch tensor: torch.Size([1, 2048])loss_mask + torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor:batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +tokens batch tensor: attention_mask torch.Size([1, 1, 2048, 2048])torch.Size([1, 2048]) + +batch tensor: position_ids batch tensor: torch.Size([1, 2048])labels + torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokensbatch tensor after cp: torch.Size([1, 512])tokens +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) + batch tensor after cp:torch.Size([1, 512]) +labels batch tensor after cp:torch.Size([1, 512]) +batch tensor: loss_mask torch.Size([1, 2048]) +labels batch tensor after cp: torch.Size([1, 512])loss_mask + batch tensor after cp:torch.Size([1, 512]) +loss_mask batch tensor after cp: torch.Size([1, 512])attention_mask + batch tensor after cp:torch.Size([1, 1, 512, 2048]) +attention_mask batch tensor after cp: torch.Size([1, 1, 512, 2048])position_ids + batch tensor after cp:torch.Size([1, 512]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:26:02] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 43.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor:batch tensor: position_ids torch.Size([1, 2048])tokens + torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor:batch tensor after cp: labels torch.Size([1, 512])tokens + batch tensor after cp: loss_mask torch.Size([1, 512])torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) + +batch tensor after cp: attention_maskbatch tensor: torch.Size([1, 1, 512, 2048])labels +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) + batch tensor after cp:torch.Size([1, 2048]) +position_idsbatch tensor: torch.Size([1, 512])loss_mask + torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: position_idsbatch tensor after cp: torch.Size([1, 2048])tokens + torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:26:02] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 43.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048])batch tensor: +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_idstokens torch.Size([1, 512]) +torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp:batch tensor: tokens torch.Size([1, 512])tokens +batch tensor after cp: tokens torch.Size([1, 512]) + batch tensor after cp: labels torch.Size([1, 2048])torch.Size([1, 512]) + +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: batch tensor:loss_mask labelstorch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +torch.Size([1, 2048]) +batch tensor after cp:batch tensor: attention_maskloss_mask torch.Size([1, 1, 512, 2048])torch.Size([1, 2048]) + +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: batch tensor:position_ids attention_masktorch.Size([1, 512]) +batch tensor: position_ids torch.Size([1, 2048]) +torch.Size([1, 1, 2048, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor: tokens torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 2048, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor: position_ids torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: position_ids torch.Size([1, 512]) +batch tensor after cp: tokens torch.Size([1, 512]) +batch tensor after cp: labels torch.Size([1, 512]) +batch tensor after cp: loss_mask torch.Size([1, 512]) +batch tensor after cp: attention_mask torch.Size([1, 1, 512, 2048]) +batch tensor after cp: position_ids torch.Size([1, 512]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:26:03] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 44.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:26:03 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.027827978134155273 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.027869701385498047 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.027898311614990234 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.027916908264160156 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.027917861938476562 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.02837061882019043 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.02837657928466797 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.028411388397216797 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.031043529510498047 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.031070947647094727 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.031116247177124023 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.031302452087402344 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.03153634071350098 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.032885074615478516 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.03598499298095703 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.03684043884277344 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +Running ctx_length=4096, TP_SIZE=4, CP_SIZE=4, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 16, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 4096 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 16 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 4096 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 4096 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 16 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.050 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 6.824 seconds +time to initialize megatron (seconds): 13.608 +[after megatron is initialized] datetime: 2025-06-21 21:27:20 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (156830720 elements, 156830720 padded size): + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.embedding.position_embeddings.weight + module.embedding.word_embeddings.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (2.60, 2.76) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:27:21 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=4096, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.006788 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16648 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002069 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16640 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002105 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16671 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:27:21 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (364.31, 394.50) + train/valid/test-data-iterators-setup ..........: (19.12, 151.18) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:27:21 +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor:batch tensor: loss_mask torch.Size([1, 4096])tokens +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) + batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor: position_ids + torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp:batch tensor after cp: batch tensor after cp:labels tokensbatch tensor after cp:torch.Size([1, 1024]) +batch tensor after cp:tokenstokens batch tensor after cp: torch.Size([1, 1024])tokensloss_mask +torch.Size([1, 1024])torch.Size([1, 1024]) +torch.Size([1, 1024])batch tensor after cp: +batch tensor after cp:torch.Size([1, 1024]) + +batch tensor after cp: labelsbatch tensor after cp:batch tensor after cp:batch tensor after cp: attention_mask labelslabels torch.Size([1, 1, 1024, 4096]) labelstokenstorch.Size([1, 1024]) +torch.Size([1, 1024]) + +torch.Size([1, 1024])torch.Size([1, 1024])batch tensor after cp: + +torch.Size([1, 1024])batch tensor after cp:batch tensor after cp: batch tensor after cp:batch tensor after cp: +position_idsloss_mask loss_maskbatch tensor after cp:loss_maskloss_mask torch.Size([1, 1024])torch.Size([1, 1024]) + + labelstorch.Size([1, 1024])torch.Size([1, 1024]) torch.Size([1, 1024])batch tensor after cp: + + torch.Size([1, 1024])attention_mask +batch tensor after cp:batch tensor after cp: + batch tensor after cp: torch.Size([1, 1, 1024, 4096]) attention_maskbatch tensor after cp:attention_mask +attention_mask batch tensor after cp:torch.Size([1, 1, 1024, 4096]) loss_masktorch.Size([1, 1, 1024, 4096]) +torch.Size([1, 1, 1024, 4096])position_ids batch tensor after cp: + + torch.Size([1, 1024])batch tensor after cp: batch tensor after cp:position_idstorch.Size([1, 1024]) + +position_idsbatch tensor after cp:position_idstorch.Size([1, 1024]) +torch.Size([1, 1024])torch.Size([1, 1024])attention_mask + + torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB + [2025-06-21 21:27:33] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 11921.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 0] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2312.0 | max reserved: 2312.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2312.0 | max reserved: 2312.0 +[Rank 9] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2330.0 | max reserved: 2330.0[Rank 13] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2328.0 | max reserved: 2328.0[Rank 14] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2328.0 | max reserved: 2328.0 + + +[Rank 3] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2312.0 | max reserved: 2312.0 +[Rank 11] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2330.0 | max reserved: 2330.0 +[Rank 12] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2328.0 | max reserved: 2328.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2312.0 | max reserved: 2312.0 +[Rank 15] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2328.0 | max reserved: 2328.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2332.0 | max reserved: 2332.0 +[Rank 8] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2330.0 | max reserved: 2330.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2272.0 | max reserved: 2272.0[Rank 6] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2332.0 | max reserved: 2332.0 + +[Rank 10] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2330.0 | max reserved: 2330.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 1964.79736328125 | max allocated: 1964.79833984375 | reserved: 2332.0 | max reserved: 2332.0 +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp:batch tensor: attention_maskbatch tensor: tokenstorch.Size([1, 1, 1024, 4096]) + batch tensor after cp:batch tensor:tokens position_ids torch.Size([1, 4096])torch.Size([1, 1024])tokens + +batch tensor: labels torch.Size([1, 4096]) +torch.Size([1, 4096]) batch tensor: + labels batch tensor:torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor:torch.Size([1, 4096])labels loss_mask +torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor:batch tensor: + labelsloss_maskbatch tensor: attention_masktorch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor after cp: tokens torch.Size([1, 1024]) +torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor:batch tensor: batch tensor: loss_mask attention_mask position_ids torch.Size([1, 4096])torch.Size([1, 4096]) +torch.Size([1, 1, 4096, 4096]) + +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor:batch tensor: position_idsattention_mask torch.Size([1, 4096]) +torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels batch tensor after cp:torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp:tokens loss_mask torch.Size([1, 1024])torch.Size([1, 1024])batch tensor after cp: + +batch tensor after cp: tokens torch.Size([1, 1024]) + batch tensor after cp:tokensbatch tensor after cp: labels attention_masktorch.Size([1, 1024]) torch.Size([1, 1024]) + +torch.Size([1, 1, 1024, 4096])batch tensor after cp:batch tensor after cp: +batch tensor after cp: labels torch.Size([1, 1024]) + batch tensor after cp:labelsloss_mask position_idstorch.Size([1, 1024])torch.Size([1, 1024]) + +torch.Size([1, 1024])batch tensor after cp:batch tensor after cp: +batch tensor after cp: loss_mask torch.Size([1, 1024]) + loss_maskattention_mask torch.Size([1, 1024])torch.Size([1, 1, 1024, 4096]) + +batch tensor after cp:batch tensor after cp: attention_maskposition_ids torch.Size([1, 1, 1024, 4096])torch.Size([1, 1024]) + +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:27:33] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 110.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor:batch tensor: labels torch.Size([1, 4096])tokens +batch tensor after cp: tokens torch.Size([1, 1024]) + batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: torch.Size([1, 4096])attention_mask + torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor:batch tensor: position_idslabels torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor:batch tensor:batch tensor after cp: position_idstokens torch.Size([1, 4096])tokens +torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp:torch.Size([1, 4096]) loss_mask + torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor:batch tensor after cp: attention_masklabels torch.Size([1, 1, 1024, 4096])torch.Size([1, 4096]) + +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp:batch tensor: position_idsloss_mask torch.Size([1, 1024])torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) + +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: labelsbatch tensor after cp: torch.Size([1, 1024]) +tokensbatch tensor after cp: loss_masktorch.Size([1, 1024]) +torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp:batch tensor after cp: labelsattention_mask torch.Size([1, 1024])torch.Size([1, 1, 1024, 4096]) + +batch tensor after cp:batch tensor after cp: loss_maskposition_ids torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:27:33] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 74.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: labelsbatch tensor: torch.Size([1, 1024]) + batch tensor after cp:tokens loss_mask torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 4096])torch.Size([1, 1, 1024, 4096]) + +batch tensor after cp:batch tensor: position_idslabels torch.Size([1, 1024])torch.Size([1, 4096]) + +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: position_idsbatch tensor after cp: torch.Size([1, 4096])tokens +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) + torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:27:33] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 72.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: batch tensor after cp:attention_mask tokens torch.Size([1, 1, 1024, 4096])torch.Size([1, 1024]) + +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp:batch tensor after cp: position_idslabels torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096])batch tensor after cp: + batch tensor:tokens position_ids torch.Size([1, 1024])torch.Size([1, 4096]) + +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:27:33] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 73.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens batch tensor after cp: tokenstorch.Size([1, 4096]) +torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp:batch tensor: labelslabels torch.Size([1, 1024])torch.Size([1, 4096]) + +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp:batch tensor: loss_mask loss_mask torch.Size([1, 1024])torch.Size([1, 4096]) + +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: batch tensor:attention_mask attention_mask torch.Size([1, 1, 1024, 4096]) +torch.Size([1, 1, 4096, 4096])batch tensor after cp: + position_idsbatch tensor: position_idstorch.Size([1, 1024]) +torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: position_idsbatch tensor: torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) + tokens torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:27:33] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 76.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_maskbatch tensor: torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokensattention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096])torch.Size([1, 1024]) + +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor:batch tensor after cp: tokens torch.Size([1, 1024])tokens +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) + batch tensor after cp: labels torch.Size([1, 1024]) +torch.Size([1, 4096])batch tensor after cp: +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) + loss_mask batch tensor:torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +labels batch tensor after cp:torch.Size([1, 4096]) attention_mask + batch tensor:torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +loss_mask batch tensor after cp:torch.Size([1, 4096]) +position_ids batch tensor:torch.Size([1, 1024]) +attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:27:33] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 77.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096])batch tensor after cp: +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) + tokens batch tensor: torch.Size([1, 1024])labels +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) + torch.Size([1, 4096])batch tensor after cp: +batch tensor after cp: position_ids torch.Size([1, 1024]) + batch tensor:labels loss_masktorch.Size([1, 1024]) +torch.Size([1, 4096])batch tensor after cp: + loss_maskbatch tensor: torch.Size([1, 1024])attention_mask +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) + batch tensor after cp:torch.Size([1, 1, 4096, 4096]) +attention_mask batch tensor: torch.Size([1, 1, 1024, 4096])position_ids + torch.Size([1, 4096])batch tensor after cp: + position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: tokens batch tensor: torch.Size([1, 4096])tokens +batch tensor: labels torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: loss_maskbatch tensor: torch.Size([1, 4096])labels + torch.Size([1, 4096])batch tensor: + batch tensor:attention_mask loss_mask torch.Size([1, 1, 4096, 4096])torch.Size([1, 4096]) + +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: batch tensor:position_ids attention_mask torch.Size([1, 4096]) +torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp:batch tensor after cp: tokenslabels torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp:batch tensor after cp: loss_masklabels torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp:batch tensor after cp: attention_maskloss_mask torch.Size([1, 1024])torch.Size([1, 1, 1024, 4096]) + +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp:batch tensor after cp: attention_maskposition_ids torch.Size([1, 1024])torch.Size([1, 1, 1024, 4096]) + +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:27:33] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 72.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor:batch tensor: tokens tokens torch.Size([1, 4096])torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) + +batch tensor: labels batch tensor: torch.Size([1, 4096])labels +batch tensor: position_ids torch.Size([1, 4096]) + batch tensor:torch.Size([1, 4096]) +loss_mask batch tensor: torch.Size([1, 4096])loss_mask + torch.Size([1, 4096])batch tensor: + attention_mask batch tensor: torch.Size([1, 1, 4096, 4096])attention_mask +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) + batch tensor:torch.Size([1, 1, 4096, 4096]) +position_ids batch tensor:torch.Size([1, 4096]) +position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp:batch tensor after cp: labelslabels torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp:batch tensor after cp: loss_maskloss_mask torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp:batch tensor after cp: attention_mask attention_mask torch.Size([1, 1, 1024, 4096])torch.Size([1, 1, 1024, 4096]) + +batch tensor after cp:batch tensor after cp: position_idsposition_ids torch.Size([1, 1024])torch.Size([1, 1024]) + +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:27:33] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 70.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:27:34] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 70.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:27:34 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.025096893310546875 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.025157690048217773 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.02516651153564453 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.026049137115478516 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.026070117950439453 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.027038097381591797 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.027174949645996094 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.027187585830688477 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.02719879150390625 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.02722620964050293 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.0273284912109375 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.02781534194946289 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.028336286544799805 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.031065940856933594 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.03171586990356445 to prepare state dict for ckpt +WARNING:megatron.core.dist_checkpointing.serialization:Overwriting old incomplete / corrupted checkpoint... +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.0364384651184082 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(236978176), 0), (np.int64(234244096), 1), (np.int64(234881024), 2), (np.int64(234881024), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(236978176), 0), (np.int64(234244096), 1), (np.int64(234881024), 2), (np.int64(234881024), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(236978176), 0), (np.int64(234244096), 1), (np.int64(234881024), 2), (np.int64(234881024), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(236978176), 0), (np.int64(234244096), 1), (np.int64(234881024), 2), (np.int64(234881024), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3517837524414062 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.357088327407837 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.351980209350586 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3589253425598145 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3698091506958008 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.35205078125 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3584694862365723 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3520586490631104 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3521974086761475 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.370042324066162 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.358001947402954 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3527662754058838 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.35837984085083 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3600234985351562 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.3533470630645752 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.011422157287597656 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, plan time: 0.0047223567962646484 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, plan time: 0.005377769470214844 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, plan time: 0.005272388458251953 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.003068685531616211 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.002986907958984375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.0021600723266601562 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.0034356117248535156 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.0041964054107666016 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.005349159240722656 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5937712 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5930526 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.593053 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.593053 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5930552 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5937786 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5937836 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5930629 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.0041577816009521484 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5930622 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, plan time: 0.005728006362915039 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5930965 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.221366882324219e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.1021575927734375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.221366882324219e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, plan time: 0.00535273551940918 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5938158 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.771087646484375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.081031799316406e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, plan time: 0.003417491912841797 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.942054748535156e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.009506225585938e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.866455078125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5938363 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.890296936035156e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, plan time: 0.005010128021240234 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.605552673339844e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5938582 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.151199340820312e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5938818 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.005807161331176758 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, plan time: 0.0052051544189453125 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5969582 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.367134094238281e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.9604644775390625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750541255.5939178 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.340576171875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.843971252441406e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.580352783203125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.054749488830566406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05488705635070801 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.6490881 rank: 12, write(async) time: 0.05516982078552246 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.6491368 rank: 9, write(async) time: 0.055299997329711914 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0558621883392334 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05630326271057129 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.6500452 rank: 13, write(async) time: 0.0562746524810791 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.6497602 rank: 6, write(async) time: 0.05669665336608887 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05695199966430664 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.651148 rank: 15, write(async) time: 0.05736565589904785 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.057488203048706055 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.6516945 rank: 14, write(async) time: 0.05791640281677246 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05826592445373535 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05772852897644043 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.6517172 rank: 2, write(async) time: 0.05866503715515137 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.651939 rank: 11, write(async) time: 0.058122873306274414 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05849480628967285 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05852103233337402 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.6519785 rank: 1, write(async) time: 0.05892372131347656 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.6519792 rank: 3, write(async) time: 0.058927059173583984 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0595850944519043 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.6531541 rank: 4, write(async) time: 0.06009697914123535 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06144547462463379 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.6557689 rank: 10, write(async) time: 0.061887502670288086 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05911898612976074 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.6565843 rank: 0, write(async) time: 0.05962491035461426 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0631711483001709 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.6567214 rank: 5, write(async) time: 0.06365847587585449 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06679725646972656 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.660347 rank: 7, write(async) time: 0.0672464370727539 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.07048869132995605 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541255.6647403 rank: 8, write(async) time: 0.07088065147399902 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 1.5735626220703125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 1.621246337890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 1.6450881958007812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 0.032118797302246094 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 1.9550323486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 0.031800270080566406 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.52587890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 0.032570600509643555 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 1.9550323486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 0.03511404991149902 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 1.9788742065429688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 0.032425880432128906 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 2.2649765014648438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 0.031658172607421875 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 0.03647446632385254 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.03661942481994629 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 0.04117250442504883 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.03317546844482422 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.03269147872924805 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.03325915336608887 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.04412078857421875 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.04610037803649902 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.03394961357116699 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.04130864143371582 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 135168, before: 1748082688, after: 1748217856 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 131072, before: 1722642432, after: 1722773504 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 28672, before: 1742716928, after: 1742745600 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 34095104, before: 1970692096, after: 2004787200 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108613632, before: 1713364992, after: 1821978624 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 109895680, before: 1747034112, after: 1856929792 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 105824256, before: 1760325632, after: 1866149888 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109178880, before: 1750048768, after: 1859227648 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109240320, before: 1711443968, after: 1820684288 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 108838912, before: 1764233216, after: 1873072128 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110014464, before: 1742950400, after: 1852964864 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 109850624, before: 1813671936, after: 1923522560 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114376704, before: 1764233216, after: 1878609920 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114360320, before: 1739223040, after: 1853583360 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 105959424, before: 1742950400, after: 1848909824 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 114089984, before: 1747034112, after: 1861124096 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 113111040, before: 1750138880, after: 1863249920 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 110243840, before: 1739223040, after: 1849466880 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 109891584, before: 1760325632, after: 1870217216 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114122752, before: 1713364992, after: 1827487744 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 113733632, before: 1750048768, after: 1863782400 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 105697280, before: 1813671936, after: 1919369216 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.3127885, rank: 10, write(sync,parallel): 0.5274412631988525 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.3211586, rank: 11, write(sync,parallel): 0.5402712821960449 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.3326652, rank: 14, write(sync,parallel): 0.5545639991760254 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 114511872, before: 1711443968, after: 1825955840 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 122474496, before: 1713491968, after: 1835966464 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 113905664, before: 1794404352, after: 1908310016 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.3523712, rank: 9, write(sync,parallel): 0.5721278190612793 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.61s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.3565552, rank: 7, write(sync,parallel): 0.5351717472076416 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.3580928, rank: 6, write(sync,parallel): 0.5455710887908936 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.62s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.3605893, rank: 15, write(sync,parallel): 0.58233642578125 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 130420736, before: 1794404352, after: 1924825088 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 130609152, before: 1750138880, after: 1880748032 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.3651464, rank: 13, write(sync,parallel): 0.5913746356964111 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 121987072, before: 1713491968, after: 1835479040 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.383369, rank: 5, write(sync,parallel): 0.561516284942627 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.64s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.63s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.66s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.63s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.67s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.416041, rank: 8, write(sync,parallel): 0.6030309200286865 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.68s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.4212682, rank: 12, write(sync,parallel): 0.6371610164642334 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.4210594, rank: 4, write(sync,parallel): 0.588270902633667 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.66s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.67s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.70s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.72s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212398080, before: 1748082688, after: 1960480768 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212111360, before: 1742716928, after: 1954828288 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212037632, before: 1722642432, after: 1934680064 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.6288617, rank: 1, write(sync,parallel): 0.8137555122375488 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.6564016, rank: 2, write(sync,parallel): 0.8377289772033691 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.89s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.670291, rank: 3, write(sync,parallel): 0.8517193794250488 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.91s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212344832, before: 1970692096, after: 2183036928 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.93s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750541256.757489, rank: 0, write(sync,parallel): 0.881899356842041 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.98s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8131704, 3, gather: 0.10122466087341309 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8143587, 11, gather: 0.4532046318054199 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8143427, 9, gather: 0.41216063499450684 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8132577, 2, gather: 0.1171255111694336 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8144522, 13, gather: 0.39301109313964844 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8132741, 1, gather: 0.1450214385986328 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8145065, 14, gather: 0.42807936668395996 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8132684, 5, gather: 0.38071441650390625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8132741, 4, gather: 0.3503999710083008 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8132129, 6, gather: 0.4142017364501953 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8133101, 7, gather: 0.41071343421936035 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8145359, 15, gather: 0.39889049530029297 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8145823, 12, gather: 0.3429241180419922 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8146083, 10, gather: 0.4589669704437256 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8150096, 8, gather: 0.347301721572876 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8157568, 0, gather: 0.005087852478027344 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750541256.8289206, metadata_write: 0.013020753860473633 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1349s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0209s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1190s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1626s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3684s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3606s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4706s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4455s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4164s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4107s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3639s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4297s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3985s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4321s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4285s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4761s + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/4, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.0026662349700927734 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.0026712417602539062 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.0026803016662597656 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.002697467803955078 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.002710580825805664 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.002669811248779297 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.0026769638061523438 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.002661466598510742 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.0026900768280029297 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.0026848316192626953 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.002623319625854492 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.0026862621307373047 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.002671480178833008 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.0026786327362060547 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.002652883529663086 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.002686023712158203 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 10 +Done exporting trace 10 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (3179.18, 3181.42) +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +---------------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on validation set | lm loss value: 1.249281E+01 | lm loss PPL: 2.664139E+05 | +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp:batch tensor: labels torch.Size([1, 1024]) +batch tensor after cp:tokens loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 4096])torch.Size([1, 1, 1024, 4096]) + +batch tensor after cp: batch tensor:position_ids labelstorch.Size([1, 1024]) +torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +batch tensor: tokens torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 4096, 4096]) +batch tensor: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 1024]) +batch tensor after cp: labels torch.Size([1, 1024]) +batch tensor after cp: loss_mask torch.Size([1, 1024]) +batch tensor after cp: attention_mask torch.Size([1, 1, 1024, 4096]) +batch tensor after cp: position_ids torch.Size([1, 1024]) +Start exporting trace 11 +Done exporting trace 11 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (69.63, 72.65) +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 1.249281E+01 | lm loss PPL: 2.664139E+05 | +---------------------------------------------------------------------------------------------------------- +Running ctx_length=8192, TP_SIZE=4, CP_SIZE=4, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 16, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 8192 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 16 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 8192 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 8192 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 16 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.061 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.954 seconds +time to initialize megatron (seconds): 11.988 +[after megatron is initialized] datetime: 2025-06-21 21:28:20 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +>>> embedding>>> embedding + +>>> decoder +>>> decoder>>> output_layer + +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 + +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (173607936 elements, 173607936 padded size): + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (17.85, 18.15) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:28:21 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=8192, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.006250 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 8324 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002009 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 8320 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001881 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 8335 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:28:21 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (780.04, 796.59) + train/valid/test-data-iterators-setup ..........: (18.22, 151.47) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:28:21 +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +Start exporting trace 0 +Done exporting trace 0 + [2025-06-21 21:28:35] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 13999.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB +[Rank 10] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2740.0 | max reserved: 2740.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2632.0 | max reserved: 2632.0 +[Rank 14] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2648.0 | max reserved: 2648.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2632.0 | max reserved: 2632.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2640.0 | max reserved: 2640.0 +[Rank 8] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2740.0 | max reserved: 2740.0 +[Rank 9] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2652.0 | max reserved: 2652.0 +[Rank 3] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2632.0 | max reserved: 2632.0 +[Rank 11] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2632.0 | max reserved: 2632.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2632.0 | max reserved: 2632.0 +[Rank 12] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2648.0 | max reserved: 2648.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2640.0 | max reserved: 2640.0 +[Rank 13] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2648.0 | max reserved: 2648.0 +[Rank 15] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2648.0 | max reserved: 2648.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2640.0 | max reserved: 2640.0[Rank 6] (after 1 iterations) memory (MB) | allocated: 2216.93408203125 | max allocated: 2216.93505859375 | reserved: 2640.0 | max reserved: 2640.0 + +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor:batch tensor after cp: tokenstokens torch.Size([1, 2048]) +batch tensor after cp: labelstorch.Size([1, 8192]) +torch.Size([1, 2048]) +batch tensor after cp:batch tensor: loss_masklabels torch.Size([1, 2048])torch.Size([1, 8192]) + +batch tensor after cp:batch tensor: attention_maskloss_mask torch.Size([1, 1, 2048, 8192])torch.Size([1, 8192]) + +batch tensor after cp:batch tensor: position_idsattention_mask torch.Size([1, 2048]) +torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:28:35] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 226.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor:batch tensor after cp: attention_mask tokens torch.Size([1, 1, 8192, 8192])torch.Size([1, 2048]) + +batch tensor:batch tensor after cp: position_idslabels torch.Size([1, 8192])torch.Size([1, 2048]) + +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:28:36] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 188.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:28:36] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 188.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:28:36] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 191.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: batch tensor:position_ids torch.Size([1, 2048]) + tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:28:36] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 194.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor:batch tensor after cp: tokens tokenstorch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: torch.Size([1, 8192])loss_mask +torch.Size([1, 2048]) +batch tensor:batch tensor after cp: labelsattention_mask torch.Size([1, 8192]) +torch.Size([1, 1, 2048, 8192])batch tensor: + batch tensor after cp:loss_mask position_ids torch.Size([1, 8192]) +torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: batch tensor:labels torch.Size([1, 2048]) +tokensbatch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_masktorch.Size([1, 8192]) torch.Size([1, 1, 2048, 8192]) + +batch tensor after cp: batch tensor:position_ids labelstorch.Size([1, 2048]) +torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor after cp:batch tensor: loss_masktokens torch.Size([1, 8192]) +torch.Size([1, 2048]) +batch tensor:batch tensor after cp: attention_masklabels torch.Size([1, 1, 8192, 8192])torch.Size([1, 2048]) + +batch tensor:batch tensor after cp: position_idsloss_mask torch.Size([1, 8192])torch.Size([1, 2048]) + +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:28:36] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 189.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:28:37] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 188.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask batch tensor after cp:torch.Size([1, 1, 8192, 8192]) +tokens torch.Size([1, 2048])batch tensor: + position_ids batch tensor after cp:torch.Size([1, 8192]) + labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:28:37] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 204.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labelsbatch tensor: torch.Size([1, 2048]) + batch tensor after cp:tokens loss_mask torch.Size([1, 2048]) +batch tensor after cp: torch.Size([1, 8192])attention_mask +torch.Size([1, 1, 2048, 8192]) +batch tensor:batch tensor after cp: labelsposition_ids torch.Size([1, 8192])torch.Size([1, 2048]) + +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor: tokens batch tensor after cp: torch.Size([1, 8192])tokens + torch.Size([1, 2048])batch tensor: + batch tensor after cp:labels labelstorch.Size([1, 8192]) torch.Size([1, 2048]) + +batch tensor:batch tensor after cp: loss_maskloss_mask torch.Size([1, 8192])torch.Size([1, 2048]) + +batch tensor after cp: batch tensor:attention_mask attention_mask torch.Size([1, 1, 2048, 8192]) +torch.Size([1, 1, 8192, 8192])batch tensor after cp: + position_idsbatch tensor: torch.Size([1, 2048])position_ids + torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +batch tensor: tokens torch.Size([1, 8192]) +batch tensor: labels torch.Size([1, 8192]) +batch tensor: loss_mask torch.Size([1, 8192]) +batch tensor: attention_mask torch.Size([1, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([1, 8192]) +batch tensor after cp: tokens torch.Size([1, 2048]) +batch tensor after cp: labels torch.Size([1, 2048]) +batch tensor after cp: loss_mask torch.Size([1, 2048]) +batch tensor after cp: attention_mask torch.Size([1, 1, 2048, 8192]) +batch tensor after cp: position_ids torch.Size([1, 2048]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:28:37] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 207.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:28:37 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.030525684356689453 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.030588626861572266 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.03060150146484375 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.030657052993774414 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.03066253662109375 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.030675888061523438 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.031067371368408203 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.03521132469177246 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.03775477409362793 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.037764787673950195 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.037817955017089844 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.03788876533508301 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.03794503211975098 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.038408756256103516 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.03847622871398926 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.03847670555114746 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +Running ctx_length=12288, TP_SIZE=4, CP_SIZE=4, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 12288 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 12288 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +using world size: 16, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 12288 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 16 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 12288 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 12288 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 16 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.049 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.619 seconds +time to initialize megatron (seconds): 9.504 +[after megatron is initialized] datetime: 2025-06-21 21:29:54 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152>>> decoder + +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder>>> embedding +>>> output_layer + +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 + +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (190385152 elements, 190385152 padded size): + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.embedding.word_embeddings.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.position_embeddings.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine + loading distributed checkpoint from gpt-checkpoint at iteration 10 +Running ctx_length=16384, TP_SIZE=4, CP_SIZE=4, BATCH_SIZE=1 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 16384 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +CTX_LENGTH: 16384 +TP_SIZE: 4 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +using world size: 16, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 16384 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 16 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 16384 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 16384 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 16 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.043 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.571 seconds +time to initialize megatron (seconds): 9.717 +[after megatron is initialized] datetime: 2025-06-21 21:30:34 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 207162368 +>>> embedding>>> embedding + +>>> decoder +>>> decoder>>> output_layer + +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 207162368 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 207162368 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 207162368 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 207162368 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (207162368 elements, 207162368 padded size): + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.final_layernorm.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 207162368 +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.32, 3.65) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:30:35 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=16384, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.004485 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 4162 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001815 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 4160 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001571 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 4167 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:30:35 +done with setup ... +training ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (1120.23, 1140.24) + train/valid/test-data-iterators-setup ..........: (15.21, 134.48) +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:30:35 +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096])batch tensor after cp: +batch tensor after cp: attention_masktokens torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096])torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) + +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB +[Rank 2] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3616.0 | max reserved: 3616.0[Rank 1] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3514.0 | max reserved: 3514.0 + +[Rank 0] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3514.0 | max reserved: 3514.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3514.0 | max reserved: 3514.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3552.0 | max reserved: 3552.0[Rank 5] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3514.0 | max reserved: 3514.0 + + [2025-06-21 21:30:50] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 14784.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 6] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3514.0 | max reserved: 3514.0 +[Rank 10] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3584.0 | max reserved: 3584.0[Rank 11] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3514.0 | max reserved: 3514.0 + +[Rank 13] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3616.0 | max reserved: 3616.0 +[Rank 8] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3514.0 | max reserved: 3514.0 +[Rank 15] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3514.0 | max reserved: 3514.0 +[Rank 3] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3616.0 | max reserved: 3616.0 +[Rank 14] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3514.0 | max reserved: 3514.0 +[Rank 9] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3584.0 | max reserved: 3584.0[Rank 12] (after 1 iterations) memory (MB) | allocated: 2841.20751953125 | max allocated: 2841.20849609375 | reserved: 3616.0 | max reserved: 3616.0 + +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:30:50] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 702.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor after cp:batch tensor: attention_masktokens torch.Size([1, 1, 16384, 16384]) +torch.Size([1, 4096])batch tensor: + position_idsbatch tensor after cp: torch.Size([1, 16384])labels + torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:30:51] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 659.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:30:52] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 673.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:30:52] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 688.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:30:53] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 672.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([1, 4096]) +torch.Size([1, 4096])batch tensor after cp: + labels batch tensor after cp:torch.Size([1, 4096]) +labelsbatch tensor after cp: loss_mask torch.Size([1, 4096])torch.Size([1, 4096]) + +batch tensor after cp:batch tensor after cp: attention_maskloss_mask torch.Size([1, 1, 4096, 16384])torch.Size([1, 4096]) +batch tensor after cp: + position_ids batch tensor after cp:torch.Size([1, 4096]) +attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:30:54] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 671.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:30:54] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 639.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:30:55] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 657.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +batch tensor: tokens torch.Size([1, 16384]) +batch tensor: labels torch.Size([1, 16384]) +batch tensor: loss_mask torch.Size([1, 16384]) +batch tensor: attention_mask torch.Size([1, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([1, 16384]) +batch tensor after cp: tokens torch.Size([1, 4096]) +batch tensor after cp: labels torch.Size([1, 4096]) +batch tensor after cp: loss_mask torch.Size([1, 4096]) +batch tensor after cp: attention_mask torch.Size([1, 1, 4096, 16384]) +batch tensor after cp: position_ids torch.Size([1, 4096]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:30:56] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 631.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:30:56 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.10231971740722656 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.10234379768371582 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.10235023498535156 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.1023705005645752 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.10239291191101074 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.10242295265197754 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.10281801223754883 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.10243630409240723 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.10312891006469727 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.1025242805480957 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.10261702537536621 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.10262656211853027 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.10287857055664062 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.1030571460723877 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.10352897644042969 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.1074376106262207 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(212860928), 1), (np.int64(213909504), 2), (np.int64(205588480), 3)]