diff --git "a/attnserver.run_attnserver.slurm.sh.343215.out.log" "b/attnserver.run_attnserver.slurm.sh.343215.out.log" --- "a/attnserver.run_attnserver.slurm.sh.343215.out.log" +++ "b/attnserver.run_attnserver.slurm.sh.343215.out.log" @@ -4185,3 +4185,6633 @@ CHECKPOINT_PATH: gpt-checkpoint PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron -------------------------------- /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 2048 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 2048 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 2048 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.041 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 3.890 seconds +time to initialize megatron (seconds): 11.083 +[after megatron is initialized] datetime: 2025-06-21 22:07:38 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112>>> embedding + +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (148442112 elements, 148442112 padded size): + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +>>> embedding +>>> embedding>>> decoder +>>> output_layer + +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.24, 3.46) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:07:38 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=2048, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.006831 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33296 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002681 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33281 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002649 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33343 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:07:38 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (561.82, 584.05) + train/valid/test-data-iterators-setup ..........: (21.04, 180.16) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:07:38 +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor:batch tensor: attention_mask torch.Size([4, 1, 8192, 8192])tokens +batch tensor: position_ids torch.Size([4, 8192]) +torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens batch tensor after cp: torch.Size([4, 1024])tokens +batch tensor after cp: labelstorch.Size([4, 1024]) +torch.Size([4, 1024]) +batch tensor after cp:batch tensor after cp: labelsloss_mask torch.Size([4, 1024])torch.Size([4, 1024]) + +batch tensor after cp:batch tensor after cp: loss_maskattention_mask torch.Size([4, 1024]) +torch.Size([4, 1, 1024, 8192])batch tensor after cp: +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) + attention_maskbatch tensor after cp: torch.Size([4, 1, 1024, 8192])position_ids + batch tensor after cp:torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB +[Rank 15] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2840.0 | max reserved: 2840.0 +[Rank 14] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2840.0 | max reserved: 2840.0 +[Rank 10] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2860.0 | max reserved: 2860.0[Rank 13] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2840.0 | max reserved: 2840.0 + +[Rank 17] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2880.0 | max reserved: 2880.0 +[Rank 19] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2884.0 | max reserved: 2884.0[Rank 16] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2884.0 | max reserved: 2884.0 + +[Rank 18] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2880.0 | max reserved: 2880.0 + [2025-06-21 22:07:50] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 11642.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 2] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2782.0 | max reserved: 2782.0[Rank 3] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2766.0 | max reserved: 2766.0 + +[Rank 1] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2766.0 | max reserved: 2766.0 +[Rank 8] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2860.0 | max reserved: 2860.0 +[Rank 21] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2786.0 | max reserved: 2786.0 +[Rank 25] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2964.0 | max reserved: 2964.0[Rank 24] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2862.0 | max reserved: 2862.0[Rank 27] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2964.0 | max reserved: 2964.0 + + +[Rank 29] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2802.0 | max reserved: 2802.0[Rank 26] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2964.0 | max reserved: 2964.0 + +[Rank 5] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2790.0 | max reserved: 2790.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2860.0 | max reserved: 2860.0[Rank 0] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2884.0 | max reserved: 2884.0 + +[Rank 9] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2860.0 | max reserved: 2860.0[Rank 11] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2860.0 | max reserved: 2860.0 + +[Rank 22] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2824.0 | max reserved: 2824.0 +[Rank 28] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2802.0 | max reserved: 2802.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2860.0 | max reserved: 2860.0 +[Rank 12] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2844.0 | max reserved: 2844.0 +[Rank 20] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2824.0 | max reserved: 2824.0 +[Rank 23] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2824.0 | max reserved: 2824.0 +[Rank 31] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2802.0 | max reserved: 2802.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2790.0 | max reserved: 2790.0 +[Rank 30] (after 1 iterations) memory (MB) | allocated: 2137.64501953125 | max allocated: 2354.22705078125 | reserved: 2904.0 | max reserved: 2904.0 +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_maskbatch tensor: torch.Size([4, 1, 8192, 8192]) +tokens batch tensor: position_ids torch.Size([4, 8192]) +torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp:batch tensor after cp: position_idstokens torch.Size([4, 1024]) +torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask batch tensor after cp:torch.Size([4, 1, 1024, 8192]) + batch tensor after cp:tokens position_ids torch.Size([4, 1024])torch.Size([4, 1024]) + +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: batch tensor:position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) + tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens batch tensor:torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) + tokensbatch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_masktorch.Size([4, 8192]) torch.Size([4, 8192]) + +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp:batch tensor: tokens torch.Size([4, 1024])tokens + batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor:batch tensor: attention_masklabels torch.Size([4, 1, 8192, 8192])torch.Size([4, 8192]) + +batch tensor:batch tensor: position_idsloss_mask torch.Size([4, 8192])torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: loss_masktorch.Size([4, 8192]) torch.Size([4, 1024]) + + +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: batch tensor:attention_mask labels torch.Size([4, 1, 1024, 8192]) +torch.Size([4, 8192]) +batch tensor after cp: batch tensor:position_ids loss_mask torch.Size([4, 1024]) +torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +Start exporting trace 1 +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +Done exporting trace 1 + [2025-06-21 22:07:50] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 109.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labelsbatch tensor: torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) + batch tensor: tokensloss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +torch.Size([4, 8192])batch tensor: + position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp:batch tensor after cp: labelstokens torch.Size([4, 1024]) +torch.Size([4, 1024])batch tensor after cp: + loss_maskbatch tensor after cp: labelstorch.Size([4, 1024]) +torch.Size([4, 1024])batch tensor after cp: +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) + batch tensor after cp:attention_mask loss_masktorch.Size([4, 1, 1024, 8192]) +torch.Size([4, 1024])batch tensor after cp: +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) + batch tensor after cp:position_ids attention_masktorch.Size([4, 1024]) +torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +Start exporting trace 2 +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +Done exporting trace 2 +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) + [2025-06-21 22:07:50] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 78.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192])batch tensor: +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) + batch tensor after cp: tokensposition_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor:batch tensor after cp: tokens tokenstorch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp:torch.Size([4, 8192]) loss_mask +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) + torch.Size([4, 1024]) +batch tensor: batch tensor after cp:labels attention_mask torch.Size([4, 8192]) +torch.Size([4, 1, 1024, 8192])batch tensor: +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) + batch tensor after cp:loss_mask position_idstorch.Size([4, 8192]) +torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: batch tensor after cp:attention_mask tokenstorch.Size([4, 1, 8192, 8192]) +batch tensor:torch.Size([4, 1024]) +position_ids batch tensor after cp:torch.Size([4, 8192]) +labels torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor:batch tensor: tokens tokens torch.Size([4, 8192])torch.Size([4, 8192]) + +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor:batch tensor: labelslabels torch.Size([4, 8192])torch.Size([4, 8192]) + +batch tensor: batch tensor:loss_mask loss_masktorch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) + batch tensor:torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens batch tensor after cp:torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +attention_mask torch.Size([4, 1, 8192, 8192])batch tensor: + batch tensor:attention_mask position_ids torch.Size([4, 1, 8192, 8192])torch.Size([4, 8192]) + +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +tokens batch tensor:torch.Size([4, 1024]) +labelsbatch tensor after cp: torch.Size([4, 8192])labels +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) + batch tensor:torch.Size([4, 1024]) + batch tensor after cp:loss_mask loss_masktorch.Size([4, 8192]) +torch.Size([4, 1024]) +batch tensor:batch tensor after cp: attention_maskattention_mask torch.Size([4, 1, 8192, 8192])torch.Size([4, 1, 1024, 8192]) + +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp:batch tensor: position_idsposition_ids torch.Size([4, 1024])torch.Size([4, 8192]) + +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp:batch tensor after cp: tokensloss_mask torch.Size([4, 1024])torch.Size([4, 1024]) + +batch tensor after cp: batch tensor after cp:attention_mask labels torch.Size([4, 1, 1024, 8192])torch.Size([4, 1024]) + +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp:batch tensor after cp: position_idsloss_mask torch.Size([4, 1024])torch.Size([4, 1024]) + +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +Start exporting trace 3 +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +Done exporting trace 3 +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) + [2025-06-21 22:07:50] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 75.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp:batch tensor after cp: position_idstokens torch.Size([4, 1024]) +torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens batch tensor: torch.Size([4, 8192])tokens +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +torch.Size([4, 8192])batch tensor:batch tensor after cp: +loss_mask tokenstorch.Size([4, 8192])batch tensor: + batch tensor:labelstorch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +torch.Size([4, 8192])attention_mask +batch tensor after cp: batch tensor:torch.Size([4, 1, 8192, 8192]) labels +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +loss_mask batch tensor: torch.Size([4, 1024]) torch.Size([4, 8192]) + +position_idsbatch tensor after cp: loss_maskbatch tensor: torch.Size([4, 8192])torch.Size([4, 1024])attention_mask + + batch tensor after cp:torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +attention_maskbatch tensor: position_idstorch.Size([4, 1, 1024, 8192]) +torch.Size([4, 8192])batch tensor after cp: +batch tensor: tokens torch.Size([4, 8192]) +batch tensor:batch tensor: tokens tokens torch.Size([4, 8192]) +torch.Size([4, 8192])batch tensor: +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) + position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) + labels batch tensor:torch.Size([4, 8192]) +labelsbatch tensor: torch.Size([4, 8192])loss_mask +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) + batch tensor:torch.Size([4, 8192]) +loss_mask torch.Size([4, 8192])batch tensor: + attention_mask batch tensor:torch.Size([4, 1, 8192, 8192]) +attention_maskbatch tensor: torch.Size([4, 1, 8192, 8192])position_ids +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) + batch tensor:torch.Size([4, 8192]) +position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp:batch tensor after cp: labelstokens torch.Size([4, 1024]) +torch.Size([4, 1024])batch tensor after cp: +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) + batch tensor after cp:loss_mask labelstorch.Size([4, 1024]) +torch.Size([4, 1024])batch tensor after cp: +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) + batch tensor after cp:attention_mask loss_mask torch.Size([4, 1, 1024, 8192])torch.Size([4, 1024]) + +batch tensor after cp:batch tensor after cp: position_idsattention_mask torch.Size([4, 1024]) +torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor:batch tensor after cp: labels tokenstorch.Size([4, 8192]) + batch tensor: torch.Size([4, 1024])loss_mask + batch tensor after cp: torch.Size([4, 8192])labels + torch.Size([4, 1024])batch tensor: +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) + attention_maskbatch tensor after cp: torch.Size([4, 1, 8192, 8192])loss_mask + batch tensor:torch.Size([4, 1024]) +position_idsbatch tensor after cp: torch.Size([4, 8192])attention_mask +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) + torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +Start exporting trace 4 +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +Done exporting trace 4 + [2025-06-21 22:07:50] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 72.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokensbatch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +torch.Size([4, 8192])batch tensor: +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) + labels batch tensor: torch.Size([4, 8192])labels + batch tensor:torch.Size([4, 8192]) +loss_maskbatch tensor: torch.Size([4, 8192])loss_mask +batch tensor: labelsbatch tensor: torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) + torch.Size([4, 8192]) + batch tensor:tokens loss_mask torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_maskbatch tensor: torch.Size([4, 8192]) +batch tensor:batch tensor: attention_mask attention_mask torch.Size([4, 1, 8192, 8192])torch.Size([4, 1, 8192, 8192]) + +batch tensor: attention_mask torch.Size([4, 8192])torch.Size([4, 1, 8192, 8192]) + +batch tensor: batch tensor:position_ids labelstorch.Size([4, 8192]) +torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) + batch tensor:tokens attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor:batch tensor: position_idsposition_ids torch.Size([4, 8192])torch.Size([4, 8192]) + +batch tensor after cp:batch tensor after cp: tokens tokens torch.Size([4, 1024]) +torch.Size([4, 1024])batch tensor after cp: + labelsbatch tensor after cp: torch.Size([4, 1024])labels + batch tensor after cp:torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: torch.Size([4, 8192])position_ids +torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +loss_maskbatch tensor after cp: torch.Size([4, 1024])loss_mask + batch tensor after cp:torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +attention_mask batch tensor after cp:torch.Size([4, 1, 1024, 8192]) attention_mask + batch tensor after cp:torch.Size([4, 1, 1024, 8192]) +position_ids batch tensor after cp:torch.Size([4, 1024]) +position_ids torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: batch tensor after cp:labels tokenstorch.Size([4, 8192]) +batch tensor:torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +loss_maskbatch tensor after cp: torch.Size([4, 8192])labels + torch.Size([4, 1024]) +batch tensor: batch tensor after cp:attention_mask loss_mask torch.Size([4, 1, 8192, 8192])torch.Size([4, 1024]) + +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor:batch tensor after cp: position_idsattention_mask torch.Size([4, 1, 1024, 8192]) +torch.Size([4, 8192])batch tensor after cp: + position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels batch tensor after cp:torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +tokensbatch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +torch.Size([4, 1024])batch tensor after cp: + labelsbatch tensor after cp: torch.Size([4, 1024])attention_mask + batch tensor after cp:torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +loss_maskbatch tensor after cp: torch.Size([4, 1024])position_ids + torch.Size([4, 1024])batch tensor after cp: +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) + attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +Start exporting trace 5 +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +Done exporting trace 5 + [2025-06-21 22:07:50] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 71.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids batch tensor:torch.Size([4, 8192]) + tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor:batch tensor: attention_mask torch.Size([4, 1, 8192, 8192])tokens +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) + batch tensor: position_ids torch.Size([4, 8192]) +torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor:batch tensor after cp: attention_masktokens torch.Size([4, 1, 8192, 8192])torch.Size([4, 1024]) + +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor:batch tensor after cp: position_idslabels torch.Size([4, 8192])torch.Size([4, 1024]) + +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask batch tensor after cp:torch.Size([4, 1, 1024, 8192]) +tokens batch tensor after cp: position_idstorch.Size([4, 1024]) +torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor:batch tensor after cp: tokenstokens torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp:torch.Size([4, 8192]) labels +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) + batch tensor:torch.Size([4, 1024]) +labels batch tensor after cp:torch.Size([4, 8192]) +loss_maskbatch tensor: loss_mask torch.Size([4, 8192])torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) + +batch tensor:batch tensor after cp: attention_maskattention_mask torch.Size([4, 1, 8192, 8192])torch.Size([4, 1, 1024, 8192]) + +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor:batch tensor after cp: position_idsposition_ids torch.Size([4, 1024])torch.Size([4, 8192]) + +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: labels batch tensor:torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokensloss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 8192])torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) + +batch tensor:batch tensor: labelsposition_ids torch.Size([4, 8192])torch.Size([4, 8192]) + +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor:batch tensor: tokenstokens torch.Size([4, 8192])torch.Size([4, 8192]) + +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor:batch tensor: labelslabels torch.Size([4, 8192])torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) + +batch tensor:batch tensor: loss_maskloss_mask torch.Size([4, 8192])torch.Size([4, 8192]) + +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor:batch tensor: attention_maskattention_mask torch.Size([4, 1, 8192, 8192]) +torch.Size([4, 1, 8192, 8192]) +batch tensor: batch tensor:position_ids position_idstorch.Size([4, 8192]) +torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens batch tensor after cp:torch.Size([4, 1024]) +tokensbatch tensor after cp: labelstorch.Size([4, 1024]) +torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp:batch tensor after cp: labels loss_masktorch.Size([4, 1024]) +torch.Size([4, 1024])batch tensor after cp: +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) + batch tensor after cp:loss_mask torch.Size([4, 1024])attention_mask + batch tensor after cp:torch.Size([4, 1, 1024, 8192]) +attention_maskbatch tensor after cp: position_idstorch.Size([4, 1, 1024, 8192]) +torch.Size([4, 1024])batch tensor after cp: +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) + position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 22:07:51] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 69.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens batch tensor after cp: tokens torch.Size([4, 8192]) +torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp:batch tensor: labelslabels torch.Size([4, 1024])torch.Size([4, 8192]) + +batch tensor after cp:batch tensor: loss_maskloss_mask torch.Size([4, 1024])torch.Size([4, 8192]) + +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_maskbatch tensor: attention_masktorch.Size([4, 1, 1024, 8192]) +torch.Size([4, 1, 8192, 8192])batch tensor after cp: + position_idsbatch tensor: torch.Size([4, 1024])position_ids +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) + torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192])batch tensor after cp: + batch tensor:tokens position_ids torch.Size([4, 1024])torch.Size([4, 8192]) + +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: batch tensor:tokens tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens batch tensor after cp: torch.Size([4, 8192])tokens +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: labels torch.Size([4, 8192])torch.Size([4, 8192]) + +batch tensor: loss_maskbatch tensor: torch.Size([4, 8192])labels + batch tensor: torch.Size([4, 8192])attention_mask +batch tensor after cp:batch tensor: tokensloss_mask torch.Size([4, 8192])torch.Size([4, 1024]) + +batch tensor after cp:batch tensor: labelsattention_mask torch.Size([4, 1024])torch.Size([4, 1, 8192, 8192]) + +batch tensor after cp: batch tensor:loss_mask position_idstorch.Size([4, 1024]) +torch.Size([4, 8192]) +torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) + batch tensor:torch.Size([4, 1, 8192, 8192]) +loss_mask batch tensor:torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor:batch tensor after cp: labelslabels torch.Size([4, 8192])torch.Size([4, 1024]) + +batch tensor:batch tensor after cp: loss_maskloss_mask torch.Size([4, 8192])torch.Size([4, 1024]) + +batch tensor: tokens torch.Size([4, 8192]) +position_ids batch tensor:torch.Size([4, 8192]) +attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp:batch tensor: attention_maskattention_mask torch.Size([4, 1, 1024, 8192])torch.Size([4, 1, 8192, 8192]) + +batch tensor: labelsbatch tensor: torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: batch tensor:position_ids position_idstorch.Size([4, 1024]) +torch.Size([4, 8192]) +batch tensor: loss_mask tokenstorch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +torch.Size([4, 8192])batch tensor: +position_ids torch.Size([4, 8192])batch tensor: + labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: attention_mask batch tensor after cp:torch.Size([4, 1, 1024, 8192]) +tokensbatch tensor after cp: position_idstorch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +torch.Size([4, 1024])batch tensor after cp: + labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask batch tensor:torch.Size([4, 1024]) +batch tensor: batch tensor after cp:tokens tokens torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) + batch tensor after cp:tokens attention_mask torch.Size([4, 1, 1024, 8192]) +torch.Size([4, 8192])batch tensor after cp: + labels batch tensor:torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024])torch.Size([4, 8192]) + +batch tensor after cp:labels loss_mask torch.Size([4, 8192])torch.Size([4, 1024]) + +batch tensor:batch tensor after cp: loss_maskattention_mask torch.Size([4, 8192])torch.Size([4, 1, 1024, 8192]) + +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: batch tensor:position_ids attention_masktorch.Size([4, 1024]) +torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024])batch tensor: +batch tensor after cp: attention_mask tokenstorch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 22:07:51] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 95.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor:batch tensor: tokens tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +torch.Size([4, 8192])batch tensor: + labels batch tensor:torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +labelsbatch tensor: torch.Size([4, 8192])loss_mask + batch tensor:torch.Size([4, 8192]) +loss_mask batch tensor:torch.Size([4, 8192]) +attention_mask batch tensor:torch.Size([4, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +attention_mask batch tensor: torch.Size([4, 1, 8192, 8192])position_ids + batch tensor:torch.Size([4, 8192]) +position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([4, 1024])torch.Size([4, 1024]) + +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp:batch tensor after cp: labelslabels torch.Size([4, 1024]) +torch.Size([4, 1024]) +batch tensor after cp: batch tensor after cp:loss_mask loss_masktorch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +torch.Size([4, 1024]) +batch tensor after cp: batch tensor after cp:attention_mask attention_mask torch.Size([4, 1, 1024, 8192]) +torch.Size([4, 1, 1024, 8192])batch tensor after cp: + batch tensor after cp:position_ids position_idstorch.Size([4, 1024]) +torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor:batch tensor: tokens tokens torch.Size([4, 8192]) +torch.Size([4, 8192]) +batch tensor: labelsbatch tensor: labelstorch.Size([4, 8192]) +torch.Size([4, 8192])batch tensor: + loss_maskbatch tensor: loss_masktorch.Size([4, 8192]) +torch.Size([4, 8192]) +batch tensor: attention_maskbatch tensor: attention_masktorch.Size([4, 1, 8192, 8192]) +torch.Size([4, 1, 8192, 8192]) +batch tensor: batch tensor:position_ids position_ids torch.Size([4, 8192]) +torch.Size([4, 8192]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([4, 1024])torch.Size([4, 1024]) + +batch tensor after cp:batch tensor after cp: labelslabels torch.Size([4, 1024])torch.Size([4, 1024]) + +batch tensor after cp:batch tensor after cp: loss_maskloss_mask torch.Size([4, 1024])torch.Size([4, 1024]) + +batch tensor after cp:batch tensor after cp: attention_maskattention_mask torch.Size([4, 1, 1024, 8192])torch.Size([4, 1, 1024, 8192]) + +batch tensor after cp:batch tensor after cp: position_idsposition_ids torch.Size([4, 1024])torch.Size([4, 1024]) + +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 22:07:51] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 95.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor:batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +tokens batch tensor: position_ids torch.Size([4, 8192]) +torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens batch tensor after cp:torch.Size([4, 8192]) +tokens batch tensor:torch.Size([4, 1024]) +labels batch tensor after cp:torch.Size([4, 8192]) +labels batch tensor: torch.Size([4, 1024])loss_mask + batch tensor after cp:torch.Size([4, 8192]) +loss_mask batch tensor:torch.Size([4, 1024]) +attention_maskbatch tensor after cp: torch.Size([4, 1, 8192, 8192])attention_mask +torch.Size([4, 1, 1024, 8192])batch tensor: + position_idsbatch tensor after cp: torch.Size([4, 8192])position_ids + torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp:batch tensor after cp:torch.Size([4, 8192]) labelstokens + torch.Size([4, 1024])torch.Size([4, 1024]) +batch tensor: +batch tensor after cp: batch tensor after cp: labelsloss_mask torch.Size([4, 8192])labelstorch.Size([4, 1024]) + +batch tensor:torch.Size([4, 1024])batch tensor after cp: loss_mask +attention_mask batch tensor after cp: torch.Size([4, 8192]) +torch.Size([4, 1, 1024, 8192])loss_mask + batch tensor:batch tensor after cp: torch.Size([4, 1024]) attention_maskposition_ids + batch tensor after cp:torch.Size([4, 1, 8192, 8192])torch.Size([4, 1024]) +attention_mask +batch tensor: torch.Size([4, 1, 1024, 8192])position_ids + batch tensor after cp:torch.Size([4, 8192]) +position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens batch tensor:batch tensor: torch.Size([4, 8192])tokens +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +tokens batch tensor: labelstorch.Size([4, 8192]) torch.Size([4, 8192]) + +torch.Size([4, 8192])batch tensor: batch tensor: +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +loss_mask labelstorch.Size([4, 8192])batch tensor: +torch.Size([4, 8192]) +labelsbatch tensor:batch tensor: torch.Size([4, 8192])loss_maskattention_mask + batch tensor:torch.Size([4, 8192])torch.Size([4, 1, 8192, 8192]) + +loss_mask batch tensor:torch.Size([4, 8192])batch tensor: +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) + position_ids attention_maskbatch tensor:torch.Size([4, 8192]) + attention_mask torch.Size([4, 1, 8192, 8192])torch.Size([4, 1, 8192, 8192]) + +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor:batch tensor: position_idsposition_ids torch.Size([4, 8192])torch.Size([4, 8192]) + +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192])batch tensor: +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokensposition_ids torch.Size([4, 1024]) +torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([4, 1024])torch.Size([4, 1024]) + +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp:batch tensor after cp:batch tensor after cp: tokenslabels labelstorch.Size([4, 1024]) +torch.Size([4, 1024])torch.Size([4, 1024]) +batch tensor after cp: + batch tensor after cp:labelsbatch tensor after cp: loss_masktorch.Size([4, 1024])loss_mask +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) + torch.Size([4, 1024])batch tensor after cp:torch.Size([4, 1024]) + +loss_maskbatch tensor after cp:batch tensor after cp: attention_masktorch.Size([4, 1024]) attention_mask +torch.Size([4, 1, 1024, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +torch.Size([4, 1, 1024, 8192])batch tensor after cp:batch tensor after cp: + batch tensor after cp:attention_maskposition_ids position_idstorch.Size([4, 1, 1024, 8192])torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) + +torch.Size([4, 1024])batch tensor after cp: + position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 22:07:51] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 68.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 22:07:51 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.02483677864074707 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.024903059005737305 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.024919509887695312 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.024935483932495117 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.02494645118713379 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.025533676147460938 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.0248415470123291 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.024870872497558594 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.024897098541259766 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.025748014450073242 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.025034189224243164 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.02499842643737793 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.025005578994750977 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.02538609504699707 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.025545358657836914 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.025400400161743164 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.02540445327758789 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.025728702545166016 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.02543807029724121 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.025483131408691406 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.025476932525634766 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.025934696197509766 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.02591729164123535 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.027007579803466797 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.02702951431274414 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.027047157287597656 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.027049779891967773 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.027162790298461914 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.027864456176757812 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.029044151306152344 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.02935624122619629 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.029917240142822266 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.482961893081665 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4834692478179932 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4470160007476807 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4452495574951172 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4811763763427734 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4390206336975098 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4464662075042725 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4702668190002441 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4715056419372559 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4701638221740723 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4820420742034912 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.439716100692749 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.439727544784546 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.43961501121521 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4461064338684082 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4711613655090332 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4743096828460693 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4843151569366455 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4398415088653564 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4469671249389648 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.474658727645874 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4820773601531982 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.440687656402588 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.464005947113037 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4472734928131104 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.471203088760376 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4467337131500244 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.476104736328125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.44096040725708 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.016762495040893555 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.4856858253479004 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.491856575012207 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.008501052856445312 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.004973649978637695 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.004990339279174805 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.004965066909790039 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, plan time: 0.008748292922973633 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.007332563400268555 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, plan time: 0.00871896743774414 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, plan time: 0.008765697479248047 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, plan time: 0.007605552673339844 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, plan time: 0.007337093353271484 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, plan time: 0.00762486457824707 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, plan time: 0.008141040802001953 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, plan time: 0.008153438568115234 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, plan time: 0.008071184158325195 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, plan time: 0.008429527282714844 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, plan time: 0.007447242736816406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9861696 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, plan time: 0.007067441940307617 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9872572 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9872587 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9872577 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9872577 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, plan time: 0.00832366943359375 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9874434 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9874477 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9874449 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9874494 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9874504 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, plan time: 0.008446455001831055 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.0069196224212646484 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9858406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9858458 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9874551 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, plan time: 0.006329059600830078 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, plan time: 0.008190631866455078 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9872713 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.008610963821411133 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9858546 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9874578 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.986207 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, plan time: 0.007691144943237305 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.986215 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9873016 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.1975250244140625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.269050598144531e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.2928924560546875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.987316 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.29425048828125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, plan time: 0.007117509841918945 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.985863 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, plan time: 0.007875919342041016 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9874895 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9862268 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.318092346191406e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, plan time: 0.00859689712524414 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.151199340820312e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.793571472167969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.365776062011719e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.9604644775390625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, plan time: 0.00826263427734375 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.888938903808594e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.459785461425781e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.127357482910156e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9859 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9859025 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.914138793945312e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.343292236328125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.390975952148438e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.986238 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.555152893066406e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.8650970458984375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9859128 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.581710815429688e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9862597 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.29425048828125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.008436203002929688 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, plan time: 0.007750511169433594 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.034706115722656e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.867813110351562e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.580352783203125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010418891906738281 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9921482 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.556510925292969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.891654968261719e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 4.696846008300781e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9859493 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, plan time: 0.003729581832885742 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.128715515136719e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.318092346191406e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9866261 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.845329284667969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.703636169433594e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.961822509765625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, plan time: 0.003183603286743164 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543672.9868255 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.512901306152344e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05464768409729004 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0425372 rank: 17, write(async) time: 0.0550847053527832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05525040626525879 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0556793212890625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.042954 rank: 5, write(async) time: 0.05565166473388672 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0436006 rank: 18, write(async) time: 0.05614113807678223 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05619645118713379 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05587935447692871 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.04391 rank: 7, write(async) time: 0.05664801597595215 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0437255 rank: 20, write(async) time: 0.05626845359802246 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05697345733642578 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0435593 rank: 29, write(async) time: 0.05738711357116699 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05723142623901367 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0576014518737793 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0449765 rank: 1, write(async) time: 0.05771350860595703 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05779695510864258 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0440884 rank: 14, write(async) time: 0.058243751525878906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0586087703704834 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0442564 rank: 27, write(async) time: 0.058042049407958984 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.059159278869628906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05993795394897461 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.046525 rank: 23, write(async) time: 0.059079885482788086 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.059841156005859375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.046963 rank: 3, write(async) time: 0.059702157974243164 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0462182 rank: 11, write(async) time: 0.06037020683288574 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05881094932556152 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.046561 rank: 25, write(async) time: 0.060321807861328125 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06000924110412598 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06010103225708008 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0468583 rank: 21, write(async) time: 0.0594024658203125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.046688 rank: 24, write(async) time: 0.060459136962890625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.046384 rank: 9, write(async) time: 0.060520172119140625 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06005239486694336 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06226515769958496 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06043267250061035 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06010079383850098 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0489902 rank: 28, write(async) time: 0.06278276443481445 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0467412 rank: 12, write(async) time: 0.06088399887084961 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0480127 rank: 22, write(async) time: 0.06056666374206543 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.048 rank: 19, write(async) time: 0.06054949760437012 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.060945987701416016 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06285738945007324 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0473068 rank: 10, write(async) time: 0.061403512954711914 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0508375 rank: 16, write(async) time: 0.06334567070007324 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06293606758117676 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0492928 rank: 13, write(async) time: 0.06338858604431152 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06438541412353516 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06548428535461426 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0520866 rank: 4, write(async) time: 0.06476855278015137 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0518613 rank: 15, write(async) time: 0.06590867042541504 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06647157669067383 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0531864 rank: 8, write(async) time: 0.06727027893066406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06468987464904785 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0576599 rank: 0, write(async) time: 0.06550836563110352 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.09079599380493164 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.0780966 rank: 31, write(async) time: 0.09127211570739746 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 2.0503997802734375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 1.9073486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 1.9311904907226562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 1.8596649169921875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 1.9311904907226562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 2.384185791015625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 2.002716064453125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 3.6716461181640625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 1.9073486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 2.7894973754882812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 5.245208740234375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 3.6716461181640625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 2.0265579223632812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 1.5974044799804688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 2.1457672119140625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 0.03444480895996094 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 0.03231358528137207 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 0.036618947982788086 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 0.03485298156738281 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 0.03160595893859863 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 0.032683372497558594 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 0.033994436264038086 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 0.03405117988586426 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 0.03653669357299805 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.03136920928955078 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 0.03593921661376953 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.032599449157714844 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 0.03827476501464844 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 0.03569483757019043 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 0.04137706756591797 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 0.04234814643859863 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 0.0432589054107666 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.033316850662231445 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 0.042670488357543945 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 0.04404950141906738 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 1.9311904907226562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 0.056891441345214844 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 0.05979013442993164 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 0.06510281562805176 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 0.04015922546386719 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 0.070953369140625 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.0355222225189209 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.036092519760131836 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.197218656539917 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.1839159 rank: 30, write(async) time: 0.19765329360961914 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 2.2411346435546875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.04370450973510742 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 21938176, before: 1747877888, after: 1769816064 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22003712, before: 1754583040, after: 1776586752 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22122496, before: 1736761344, after: 1758883840 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.25281262397766113 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.252901554107666 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.240538 rank: 6, write(async) time: 0.2532649040222168 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.25293445587158203 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.2405958 rank: 2, write(async) time: 0.25333595275878906 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.2401528 rank: 26, write(async) time: 0.2535238265991211 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 21893120, before: 1736159232, after: 1758052352 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30388224, before: 1735852032, after: 1766240256 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30277632, before: 1754443776, after: 1784721408 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51179520, before: 1721806848, after: 1772986368 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 131072, before: 1722695680, after: 1722826752 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51322880, before: 1716707328, after: 1768030208 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 0.03899979591369629 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 2.0265579223632812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 2.1457672119140625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30420992, before: 1719209984, after: 1749630976 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 21929984, before: 1747660800, after: 1769590784 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47259648, before: 1747972096, after: 1795231744 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 135168, before: 2019614720, after: 2019749888 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51507200, before: 1747972096, after: 1799479296 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 34537472, before: 1739403264, after: 1773940736 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30310400, before: 1733521408, after: 1763831808 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47132672, before: 1716707328, after: 1763840000 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.032918453216552734 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22237184, before: 1755508736, after: 1777745920 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 51314688, before: 1777389568, after: 1828704256 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30670848, before: 1780191232, after: 1810862080 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30437376, before: 1732317184, after: 1762754560 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72142848, before: 1747877888, after: 1820020736 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51490816, before: 1731981312, after: 1783472128 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 2.2411346435546875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30339072, before: 1746178048, after: 1776517120 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72318976, before: 1736761344, after: 1809080320 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 46854144, before: 1721806848, after: 1768660992 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 38633472, before: 1730052096, after: 1768685568 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72314880, before: 1754648576, after: 1826963456 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 54460416, before: 1735540736, after: 1790001152 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72380416, before: 1746178048, after: 1818558464 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72261632, before: 1736159232, after: 1808420864 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47124480, before: 1735540736, after: 1782665216 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72294400, before: 1754443776, after: 1826738176 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 55111680, before: 1777389568, after: 1832501248 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72335360, before: 1735852032, after: 1808187392 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72351744, before: 1719234560, after: 1791586304 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 46166016, before: 1731981312, after: 1778147328 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72253440, before: 1733521408, after: 1805774848 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72392704, before: 1739403264, after: 1811795968 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72343552, before: 1780191232, after: 1852534784 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72290304, before: 1730052096, after: 1802342400 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 38645760, before: 1799614464, after: 1838260224 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 0.06267333030700684 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 139264, before: 1734193152, after: 1734332416 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108998656, before: 1713815552, after: 1822814208 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.032776594161987305 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108982272, before: 1726078976, after: 1835061248 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72323072, before: 1755508736, after: 1827831808 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.371969, rank: 25, write(sync,parallel): 0.25092267990112305 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72314880, before: 1732317184, after: 1804632064 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72110080, before: 1747685376, after: 1819795456 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3752801, rank: 27, write(sync,parallel): 0.25330400466918945 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72101888, before: 1799614464, after: 1871716352 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3760338, rank: 17, write(sync,parallel): 0.25316739082336426 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3771536, rank: 29, write(sync,parallel): 0.25546836853027344 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3789234, rank: 23, write(sync,parallel): 0.2604396343231201 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.379971, rank: 11, write(sync,parallel): 0.2464895248413086 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3806593, rank: 22, write(sync,parallel): 0.2552635669708252 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3806746, rank: 19, write(sync,parallel): 0.25283384323120117 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3826122, rank: 9, write(sync,parallel): 0.25101184844970703 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3865767, rank: 13, write(sync,parallel): 0.24812078475952148 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3872244, rank: 15, write(sync,parallel): 0.24595427513122559 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3890493, rank: 24, write(sync,parallel): 0.2656831741333008 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3936882, rank: 20, write(sync,parallel): 0.2700974941253662 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3928833, rank: 31, write(sync,parallel): 0.22722268104553223 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3940065, rank: 28, write(sync,parallel): 0.25687193870544434 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.3979867, rank: 14, write(sync,parallel): 0.25933337211608887 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.4021018, rank: 16, write(sync,parallel): 0.2534937858581543 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.4061642, rank: 12, write(sync,parallel): 0.27168726921081543 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.4116836, rank: 5, write(sync,parallel): 0.2789344787597656 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.4169705, rank: 4, write(sync,parallel): 0.27622151374816895 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51175424, before: 1717305344, after: 1768480768 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.423432, rank: 21, write(sync,parallel): 0.24907588958740234 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 46981120, before: 1717305344, after: 1764286464 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.4234517, rank: 10, write(sync,parallel): 0.2676539421081543 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.4282675, rank: 18, write(sync,parallel): 0.2705879211425781 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.4273832, rank: 8, write(sync,parallel): 0.261918306350708 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 200704, before: 1728180224, after: 1728380928 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.35s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.35s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.35s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.36s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.35s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.35s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.36s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.36s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.37s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.38s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.4794226, rank: 30, write(sync,parallel): 0.20805001258850098 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.39s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.39s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109047808, before: 1728741376, after: 1837789184 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.28s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 49868800, before: 1721692160, after: 1771560960 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47362048, before: 1721692160, after: 1769054208 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.5316966, rank: 7, write(sync,parallel): 0.39958620071411133 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.47s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109101056, before: 1728491520, after: 1837592576 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.5775454, rank: 26, write(sync,parallel): 0.2131485939025879 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.6144123, rank: 6, write(sync,parallel): 0.28626227378845215 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212328448, before: 1722695680, after: 1935024128 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.37s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.6946447, rank: 3, write(sync,parallel): 0.505845308303833 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212422656, before: 2019672064, after: 2232094720 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212312064, before: 1734131712, after: 1946443776 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.60s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.7532747, rank: 0, write(sync,parallel): 0.5169768333435059 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.7586944, rank: 1, write(sync,parallel): 0.5716612339019775 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.65s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.61s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212307968, before: 1728180224, after: 1940488192 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543673.8700364, rank: 2, write(sync,parallel): 0.49771928787231445 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.57s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9106848, 2, gather: 0.0024085044860839844 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9107704, 1, gather: 0.11243510246276855 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9096243, 11, gather: 0.4889559745788574 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9096441, 9, gather: 0.48592042922973633 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.909782, 14, gather: 0.4612724781036377 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9099069, 13, gather: 0.4738037586212158 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9098308, 10, gather: 0.4377288818359375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9100294, 12, gather: 0.4512670040130615 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9102323, 8, gather: 0.4281148910522461 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9102752, 15, gather: 0.47650623321533203 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9117115, 23, gather: 0.4922480583190918 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9118257, 21, gather: 0.4402017593383789 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.911034, 25, gather: 0.501164436340332 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9118636, 16, gather: 0.45987987518310547 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9111216, 27, gather: 0.494051456451416 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.912609, 3, gather: 0.16509056091308594 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9119565, 18, gather: 0.42877197265625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9119463, 20, gather: 0.47847867012023926 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9119196, 22, gather: 0.4906959533691406 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.911993, 19, gather: 0.48819708824157715 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9113932, 30, gather: 0.4015071392059326 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9114, 29, gather: 0.4934060573577881 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9126499, 7, gather: 0.3410475254058838 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.912097, 17, gather: 0.4988725185394287 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9114194, 26, gather: 0.28770899772644043 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9126725, 6, gather: 0.2536735534667969 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9114375, 28, gather: 0.4663529396057129 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9129648, 5, gather: 0.4606592655181885 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9129806, 4, gather: 0.44910287857055664 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9115698, 24, gather: 0.47824668884277344 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.911694, 31, gather: 0.46849513053894043 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9157865, 0, gather: 0.11443185806274414 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543673.9308155, metadata_write: 0.014854669570922852 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5110s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5081s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4601s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4835s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4733s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4982s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4499s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0248s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1351s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1322s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1861s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4958s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5202s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4818s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4507s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4221s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5225s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5152s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4988s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5144s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4876s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3087s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4891s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4810s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5101s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4697s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5142s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5124s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5001s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2743s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3618s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4623s + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/4, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.0067479610443115234 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.006735324859619141 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.006688833236694336 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.00675654411315918 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.00674748420715332 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.0067462921142578125 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.006770610809326172 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.006750822067260742 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.0067751407623291016 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.006737709045410156 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.006740093231201172 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.006765842437744141 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.006662845611572266 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.006812572479248047 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.0067975521087646484 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.006803989410400391 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.0068416595458984375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.006766080856323242 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.006760835647583008 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.0067789554595947266 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.006766557693481445 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.006706714630126953 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.006743431091308594 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.00677180290222168 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.006726264953613281 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.006851911544799805 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.006791114807128906 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.006781578063964844 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.0068776607513427734 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.006826162338256836 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.006882667541503906 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.006838321685791016 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp:batch tensor after cp: tokens tokens torch.Size([4, 1024])batch tensor after cp:torch.Size([4, 1024]) + +batch tensor after cp:tokens batch tensor after cp: labels torch.Size([4, 1024])labelstorch.Size([4, 1024]) + +batch tensor after cp:torch.Size([4, 1024])batch tensor after cp: + labelsbatch tensor after cp:loss_mask loss_masktorch.Size([4, 1024])torch.Size([4, 1024]) + +torch.Size([4, 1024])batch tensor after cp: +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: batch tensor after cp: loss_mask attention_mask attention_mask torch.Size([4, 1024]) torch.Size([4, 1, 1024, 8192]) +torch.Size([4, 1, 1024, 8192]) + +batch tensor after cp:batch tensor after cp:batch tensor after cp: position_idsattention_maskposition_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +torch.Size([4, 1, 1024, 8192])torch.Size([4, 1024]) + +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: loss_maskbatch tensor: torch.Size([4, 1024]) + batch tensor after cp:tokens attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 8192])torch.Size([4, 1024]) + +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: batch tensor after cp:loss_mask tokenstorch.Size([4, 8192]) +torch.Size([4, 1024])batch tensor: + attention_maskbatch tensor after cp: labelstorch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +torch.Size([4, 1024])batch tensor: +position_idsbatch tensor after cp: torch.Size([4, 8192])loss_mask + torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask batch tensor:torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_idstokens torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +Start exporting trace 10 +Done exporting trace 10 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (3227.88, 3229.46) +---------------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on validation set | lm loss value: 1.103782E+01 | lm loss PPL: 6.218206E+04 | +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp:batch tensor: loss_mask torch.Size([4, 1024])tokens +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) + batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192])batch tensor after cp: + batch tensor after cp:torch.Size([4, 8192]) tokens +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) + position_idsbatch tensor: torch.Size([4, 1024])torch.Size([4, 1024])labels + + torch.Size([4, 8192])batch tensor after cp: +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) + labelsbatch tensor: torch.Size([4, 1024])loss_mask + batch tensor after cp:torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +loss_mask batch tensor:torch.Size([4, 1024]) +attention_mask batch tensor after cp: torch.Size([4, 1, 8192, 8192])attention_mask +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor:batch tensor: tokenstokens torch.Size([4, 8192])torch.Size([4, 8192]) + +batch tensor: labels torch.Size([4, 8192]) + torch.Size([4, 1, 1024, 8192])batch tensor: + position_idsbatch tensor after cp: torch.Size([4, 8192])position_ids + torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor:batch tensor: labelslabels torch.Size([4, 8192])torch.Size([4, 8192]) + +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor:batch tensor: loss_maskloss_mask torch.Size([4, 8192])torch.Size([4, 8192]) + +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor:batch tensor: attention_maskattention_mask torch.Size([4, 1, 8192, 8192])torch.Size([4, 1, 8192, 8192]) +batch tensor: +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +position_ids batch tensor:torch.Size([4, 8192]) + position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor: tokens torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor: labels torch.Size([4, 8192]) +batch tensor: loss_mask torch.Size([4, 8192]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: attention_mask torch.Size([4, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor: position_ids torch.Size([4, 8192]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: tokens torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: labels torch.Size([4, 1024]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +batch tensor after cp: loss_mask torch.Size([4, 1024]) +batch tensor after cp: attention_mask torch.Size([4, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([4, 1024]) +Start exporting trace 11 +Done exporting trace 11 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (73.36, 74.05) +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 1.103782E+01 | lm loss PPL: 6.218206E+04 | +---------------------------------------------------------------------------------------------------------- +Running ctx_length=4096, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=4 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +Cleaning up checkpoint directory: gpt-checkpoint +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 4096 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 4096 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 4096 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.043 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.889 seconds +time to initialize megatron (seconds): 9.479 +[after megatron is initialized] datetime: 2025-06-21 22:08:36 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> embedding>>> decoder + +>>> output_layer +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (156830720 elements, 156830720 padded size): + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.bias + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.embedding.position_embeddings.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (6.18, 6.67) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:08:37 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=4096, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005407 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16648 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002260 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16640 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002066 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16671 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:08:37 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (698.08, 737.77) + train/valid/test-data-iterators-setup ..........: (17.84, 143.95) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:08:37 +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor: tokens torch.Size([4, 16384]) +batch tensor: labels torch.Size([4, 16384]) +batch tensor: loss_mask torch.Size([4, 16384]) +batch tensor: attention_mask torch.Size([4, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([4, 16384]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids batch tensor after cp:torch.Size([4, 2048]) + tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048]) +batch tensor after cp: tokens torch.Size([4, 2048]) +batch tensor after cp: labels torch.Size([4, 2048]) +batch tensor after cp: loss_mask torch.Size([4, 2048]) +batch tensor after cp: attention_mask torch.Size([4, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([4, 2048])