diff --git "a/attnserver.run_attnserver.slurm.sh.343202.out.log" "b/attnserver.run_attnserver.slurm.sh.343202.out.log" --- "a/attnserver.run_attnserver.slurm.sh.343202.out.log" +++ "b/attnserver.run_attnserver.slurm.sh.343202.out.log" @@ -29940,3 +29940,3955 @@ CHECKPOINT_PATH: gpt-checkpoint PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron -------------------------------- /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 16, data-parallel size: 1, context-parallel size: 2, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 2 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 81920 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 8 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 16 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 81920 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 81920 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 8 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 16 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 8 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.042 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +>>> done with compiling and loading fused kernels. Compilation time: 2.742 seconds +time to initialize megatron (seconds): 8.476 +[after megatron is initialized] datetime: 2025-06-21 21:19:57 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 405861888 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 405861888 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 405861888 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 405861888 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 405861888 + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 405861888 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 405861888 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 405861888 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 405861888 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 405861888 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 405861888 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 405861888 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 405861888 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 405861888 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 405861888 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (405861888 elements, 405861888 padded size): + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.embedding.position_embeddings.weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 405861888 +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.60, 4.96) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:20:02 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=81920, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005866 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 832 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001633 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 832 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001336 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 833 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:20:02 +done with setup ... +training ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (5272.86, 5297.27) + train/valid/test-data-iterators-setup ..........: (18.31, 122.97) +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:20:02 +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.0703 +Theoretical memory footprints: weight and optimizer=1206.09 MB +[Rank 4] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 109988.0 | max reserved: 109988.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 109988.0 | max reserved: 109988.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 109988.0 | max reserved: 109988.0 +[Rank 3] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 109988.0 | max reserved: 109988.0 + [2025-06-21 21:21:32] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 89695.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 13] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 110068.0 | max reserved: 110068.0 +[Rank 12] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 110068.0 | max reserved: 110068.0 +[Rank 14] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 110068.0 | max reserved: 110068.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 109988.0 | max reserved: 109988.0[Rank 2] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 109988.0 | max reserved: 109988.0 + +[Rank 11] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 110068.0 | max reserved: 110068.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 109988.0 | max reserved: 109988.0 +[Rank 10] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 110068.0 | max reserved: 110068.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 108708.0 | max reserved: 108708.0 +[Rank 9] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 110068.0 | max reserved: 110068.0 +[Rank 15] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 110068.0 | max reserved: 110068.0 +[Rank 8] (after 1 iterations) memory (MB) | allocated: 81598.98193359375 | max allocated: 105358.30126953125 | reserved: 108788.0 | max reserved: 108788.0 +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 21:22:08] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 36310.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:22:42] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 34217.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:23:16] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 33337.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor:batch tensor after cp: labels tokenstorch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840])torch.Size([2, 81920]) + +batch tensor after cp: labels batch tensor:torch.Size([2, 81920]) +attention_mask batch tensor after cp: loss_mask torch.Size([2, 1, 163840, 163840])torch.Size([2, 81920]) + +batch tensor after cp: attention_maskbatch tensor: position_ids torch.Size([2, 163840]) +torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:23:49] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 33400.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 21:24:23] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 33563.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 21:24:57] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 34266.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 21:25:31] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 33576.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 21:26:04] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 33596.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +batch tensor: tokens torch.Size([2, 163840]) +batch tensor: labels torch.Size([2, 163840]) +batch tensor: loss_mask torch.Size([2, 163840]) +batch tensor: attention_mask torch.Size([2, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([2, 163840]) +batch tensor after cp: tokens torch.Size([2, 81920]) +batch tensor after cp: labels torch.Size([2, 81920]) +batch tensor after cp: loss_mask torch.Size([2, 81920]) +batch tensor after cp: attention_mask torch.Size([2, 1, 81920, 163840]) +batch tensor after cp: position_ids torch.Size([2, 81920]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 21:26:38] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 33562.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 21:26:38 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.05685257911682129 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.056917667388916016 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.05694460868835449 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.05693840980529785 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.05695843696594238 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.05696606636047363 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.05748248100280762 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.05749201774597168 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.056986331939697266 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.05750250816345215 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.05751299858093262 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.05750107765197754 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.057984352111816406 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.060320377349853516 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.06042885780334473 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.07724857330322266 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(209748992), 0), (np.int64(211812352), 1)] +Running ctx_length=98304, TP_SIZE=8, CP_SIZE=2, BATCH_SIZE=2 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 98304 +TP_SIZE: 8 +CP_SIZE: 2 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 98304 +TP_SIZE: 8 +CP_SIZE: 2 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 16, data-parallel size: 1, context-parallel size: 2, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 2 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 98304 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 8 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 16 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 98304 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 98304 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 8 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 16 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +> initialized tensor model parallel with size 8 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +INFO:megatron.training.initialize:Setting logging level to 0 +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.041 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +>>> done with compiling and loading fused kernels. Compilation time: 2.916 seconds +time to initialize megatron (seconds): 8.545 +[after megatron is initialized] datetime: 2025-06-21 21:28:20 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 472970752 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 472970752 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 472970752 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 472970752 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 472970752 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 472970752 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 472970752 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 472970752 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 472970752 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 472970752 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 472970752 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 472970752 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 472970752 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (472970752 elements, 472970752 padded size): + module.decoder.final_layernorm.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.position_embeddings.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 472970752 +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 472970752 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 472970752 +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.95, 4.90) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:28:25 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=98304, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005864 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 693 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001602 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 693 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001302 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 694 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:28:25 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (4814.85, 4832.16) + train/valid/test-data-iterators-setup ..........: (18.02, 132.27) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:28:25 +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +batch tensor: tokens torch.Size([2, 196608]) +batch tensor: labels torch.Size([2, 196608]) +batch tensor: loss_mask torch.Size([2, 196608]) +batch tensor: attention_mask torch.Size([2, 1, 196608, 196608]) +batch tensor: position_ids torch.Size([2, 196608]) +batch tensor after cp: tokens torch.Size([2, 98304]) +batch tensor after cp: labels torch.Size([2, 98304]) +batch tensor after cp: loss_mask torch.Size([2, 98304]) +batch tensor after cp: attention_mask torch.Size([2, 1, 98304, 196608]) +batch tensor after cp: position_ids torch.Size([2, 98304]) +Running ctx_length=131072, TP_SIZE=8, CP_SIZE=2, BATCH_SIZE=2 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 131072 +TP_SIZE: 8 +CP_SIZE: 2 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 131072 +TP_SIZE: 8 +CP_SIZE: 2 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 16, data-parallel size: 1, context-parallel size: 2, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 2 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 131072 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 8 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 16 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 131072 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 131072 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 8 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 16 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 8 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.039 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.777 seconds +time to initialize megatron (seconds): 8.531 +[after megatron is initialized] datetime: 2025-06-21 21:30:01 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 607188480 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (607188480 elements, 607188480 padded size): + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.weight + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.0.self_attention.linear_proj.bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 607188480 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 607188480 +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (2.77, 4.59) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 21:30:09 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=131072, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.009151 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 520 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001593 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 520 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001280 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 520 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 21:30:10 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (8036.28, 8045.84) + train/valid/test-data-iterators-setup ..........: (27.88, 154.46) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 21:30:10 +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 5.19 GiB is free. Including non-PyTorch memory, this process has 134.61 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 5.19 GiB is free. Including non-PyTorch memory, this process has 134.61 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 5.19 GiB is free. Including non-PyTorch memory, this process has 134.61 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 5.19 GiB is free. Including non-PyTorch memory, this process has 134.61 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 5.19 GiB is free. Including non-PyTorch memory, this process has 134.61 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 5.19 GiB is free. Including non-PyTorch memory, this process has 134.61 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 5.18 GiB is free. Including non-PyTorch memory, this process has 134.63 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 5.19 GiB is free. Including non-PyTorch memory, this process has 134.61 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 238, in setup_batches\n attention_mask[batch_id, :, :token_len, :token_len] = torch.tril(\n ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 5.19 GiB is free. Including non-PyTorch memory, this process has 134.61 GiB memory in use. Of the allocated memory 132.54 GiB is allocated by PyTorch, and 609.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']